diff --git a/.gitignore b/.gitignore index 0dc7b4b0..cff8bf11 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -/.vscode \ No newline at end of file +/.vscode +/frs \ No newline at end of file diff --git a/en/using-flowise/analytics/langwatch.md b/en/using-flowise/analytics/langwatch.md index c533cbcc..420a5281 100644 --- a/en/using-flowise/analytics/langwatch.md +++ b/en/using-flowise/analytics/langwatch.md @@ -1,2 +1,33 @@ +--- +description: Learn how to setup LangWatch to analyze and troubleshoot your chatflows and agentflows +--- + # LangWatch +--- + +[Langwatch](https://langwatch.ai) is a production-grade observability and LLMOps platform designed to monitor, debug, and enhance LLM applications and AI Agents at scale. + +## Setup + +1. At the top right corner of your Chatflow or Agentflow, click **Settings** > **Configuration** + +
Screenshot of user clicking in the configuration menu
+ +2. Then go to the Analyse Chatflow section + +
Screenshot of the Analyse Chatflow section with the different Analytics providers
+ +3. You will see a list of providers, along with their configuration fields. Click on LangWatch. + +
Screenshot of an analytics provider with credentials fields expanded
+ +4. If you haven't already, sign up for a free account [here](https://app.langwatch.ai) to get your API key. + +5. Fill in the configuration details, then turn the provider **ON** and click **Save** + +
Screenshot of analytics providers enabled
+ +6. You can now use LangWatch to analyze and troubleshoot your chatflows and agentflows. Refer to the [official guide](https://docs.langwatch.ai) for more details. + +
Screenshot of analytics providers enabled
diff --git a/fr/.gitbook/assets/1.jpg b/fr/.gitbook/assets/1.jpg new file mode 100644 index 00000000..1452a2b2 Binary files /dev/null and b/fr/.gitbook/assets/1.jpg differ diff --git a/fr/.gitbook/assets/2.png b/fr/.gitbook/assets/2.png new file mode 100644 index 00000000..aa69c6bc Binary files /dev/null and b/fr/.gitbook/assets/2.png differ diff --git a/fr/.gitbook/assets/3.png b/fr/.gitbook/assets/3.png new file mode 100644 index 00000000..d8f1242f Binary files /dev/null and b/fr/.gitbook/assets/3.png differ diff --git a/fr/.gitbook/assets/4.png b/fr/.gitbook/assets/4.png new file mode 100644 index 00000000..198cf347 Binary files /dev/null and b/fr/.gitbook/assets/4.png differ diff --git a/fr/.gitbook/assets/Agent Chatflow.json b/fr/.gitbook/assets/Agent Chatflow.json new file mode 100644 index 00000000..fb8301c6 --- /dev/null +++ b/fr/.gitbook/assets/Agent Chatflow.json @@ -0,0 +1,1179 @@ +{ + "nodes": [ + { + "width": 300, + "height": 554, + "id": "pinecone_0", + "position": { + "x": 416.0885364955418, + "y": -74.64623359488957 + }, + "type": "customNode", + "data": { + "id": "pinecone_0", + "label": "Pinecone", + "version": 2, + "name": "pinecone", + "type": "Pinecone", + "baseClasses": [ + "Pinecone", + "VectorStoreRetriever", + "BaseRetriever" + ], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "pineconeApi" + ], + "id": "pinecone_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pinecone_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pinecone_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pinecone_0-input-document-Document" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "Embeddings", + "id": "pinecone_0-input-embeddings-Embeddings" + } + ], + "inputs": { + "document": "", + "embeddings": "{{openAIEmbeddings_0.data.instance}}", + "pineconeIndex": "newindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"apple\"}", + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorStoreRetriever | BaseRetriever" + }, + { + "id": "pinecone_0-output-vectorStore-Pinecone|VectorStore", + "name": "vectorStore", + "label": "Pinecone Vector Store", + "type": "Pinecone | VectorStore" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 416.0885364955418, + "y": -74.64623359488957 + }, + "dragging": false + }, + { + "width": 300, + "height": 423, + "id": "openAIEmbeddings_0", + "position": { + "x": 54.119166092646566, + "y": -20.12821243199312 + }, + "type": "customNode", + "data": { + "id": "openAIEmbeddings_0", + "label": "OpenAI Embeddings", + "version": 2, + "name": "openAIEmbeddings", + "type": "OpenAIEmbeddings", + "baseClasses": [ + "OpenAIEmbeddings", + "Embeddings" + ], + "category": "Embeddings", + "description": "OpenAI API to generate embeddings for a given text", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "openAIEmbeddings_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbeddings_0-input-modelName-options" + }, + { + "label": "Strip New Lines", + "name": "stripNewLines", + "type": "boolean", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-stripNewLines-boolean" + }, + { + "label": "Batch Size", + "name": "batchSize", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-batchSize-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "text-embedding-ada-002", + "stripNewLines": "", + "batchSize": "", + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "name": "openAIEmbeddings", + "label": "OpenAIEmbeddings", + "type": "OpenAIEmbeddings | Embeddings" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 54.119166092646566, + "y": -20.12821243199312 + }, + "dragging": false + }, + { + "width": 300, + "height": 554, + "id": "pinecone_1", + "position": { + "x": 428.41115568995156, + "y": 549.0169795435812 + }, + "type": "customNode", + "data": { + "id": "pinecone_1", + "label": "Pinecone", + "version": 2, + "name": "pinecone", + "type": "Pinecone", + "baseClasses": [ + "Pinecone", + "VectorStoreRetriever", + "BaseRetriever" + ], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "pineconeApi" + ], + "id": "pinecone_1-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pinecone_1-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pinecone_1-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-lambda-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pinecone_1-input-document-Document" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "Embeddings", + "id": "pinecone_1-input-embeddings-Embeddings" + } + ], + "inputs": { + "document": "", + "embeddings": "{{openAIEmbeddings_1.data.instance}}", + "pineconeIndex": "newindex", + "pineconeNamespace": "pinecone-form10k-2", + "pineconeMetadataFilter": "{\"source\":\"tesla\"}", + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorStoreRetriever | BaseRetriever" + }, + { + "id": "pinecone_1-output-vectorStore-Pinecone|VectorStore", + "name": "vectorStore", + "label": "Pinecone Vector Store", + "type": "Pinecone | VectorStore" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 428.41115568995156, + "y": 549.0169795435812 + }, + "dragging": false + }, + { + "width": 300, + "height": 423, + "id": "openAIEmbeddings_1", + "position": { + "x": 58.45057557109914, + "y": 575.7733202609951 + }, + "type": "customNode", + "data": { + "id": "openAIEmbeddings_1", + "label": "OpenAI Embeddings", + "version": 2, + "name": "openAIEmbeddings", + "type": "OpenAIEmbeddings", + "baseClasses": [ + "OpenAIEmbeddings", + "Embeddings" + ], + "category": "Embeddings", + "description": "OpenAI API to generate embeddings for a given text", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "openAIEmbeddings_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbeddings_1-input-modelName-options" + }, + { + "label": "Strip New Lines", + "name": "stripNewLines", + "type": "boolean", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-stripNewLines-boolean" + }, + { + "label": "Batch Size", + "name": "batchSize", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-batchSize-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "text-embedding-ada-002", + "stripNewLines": "", + "batchSize": "", + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "name": "openAIEmbeddings", + "label": "OpenAIEmbeddings", + "type": "OpenAIEmbeddings | Embeddings" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 58.45057557109914, + "y": 575.7733202609951 + }, + "dragging": false + }, + { + "width": 300, + "height": 376, + "id": "bufferMemory_0", + "position": { + "x": 825.5960565466753, + "y": 1212.2401709995304 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 1, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": [ + "BufferMemory", + "BaseChatMemory", + "BaseMemory" + ], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 825.5960565466753, + "y": 1212.2401709995304 + }, + "dragging": false + }, + { + "width": 300, + "height": 382, + "id": "openAIFunctionAgent_0", + "position": { + "x": 1461.716457981219, + "y": 547.2159602910168 + }, + "type": "customNode", + "data": { + "id": "openAIFunctionAgent_0", + "label": "OpenAI Function Agent", + "version": 3, + "name": "openAIFunctionAgent", + "type": "AgentExecutor", + "baseClasses": [ + "AgentExecutor", + "BaseChain", + "Runnable" + ], + "category": "Agents", + "description": "An agent that uses Function Calling to pick the tool and args to call", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessage", + "type": "string", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "openAIFunctionAgent_0-input-systemMessage-string" + } + ], + "inputAnchors": [ + { + "label": "Allowed Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "openAIFunctionAgent_0-input-tools-Tool" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "openAIFunctionAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "OpenAI/Azure Chat Model", + "name": "model", + "type": "BaseChatModel", + "id": "openAIFunctionAgent_0-input-model-BaseChatModel" + } + ], + "inputs": { + "tools": [ + "{{retrieverTool_1.data.instance}}", + "{{retrieverTool_2.data.instance}}" + ], + "memory": "{{bufferMemory_0.data.instance}}", + "model": "{{chatOpenAI_0.data.instance}}", + "systemMessage": "You are an expert financial analyst that always answers questions with the most relevant information using the tools at your disposal.\nThese tools have information regarding companies that the user has expressed interest in.\nHere are some guidelines that you must follow:\n* For financial questions, you must use the tools to find the answer and then write a response.\n* Even if it seems like your tools won't be able to answer the question, you must still use them to find the most relevant information and insights. Not using them will appear as if you are not doing your job.\n* You may assume that the users financial questions are related to the documents they've selected.\n* For any user message that isn't related to financial analysis, respectfully decline to respond and suggest that the user ask a relevant question.\n* If your tools are unable to find an answer, you should say that you haven't found an answer but still relay any useful information the tools found.\n* Dont ask clarifying questions, just return answer.\n\nThe tools at your disposal have access to the following SEC documents that the user has selected to discuss with you:\n- Apple Inc (APPL) FORM 10K 2022\n- Tesla Inc (TSLA) FORM 10K 2022\n\nThe current date is: 2024-01-28" + }, + "outputAnchors": [ + { + "id": "openAIFunctionAgent_0-output-openAIFunctionAgent-AgentExecutor|BaseChain|Runnable", + "name": "openAIFunctionAgent", + "label": "AgentExecutor", + "type": "AgentExecutor | BaseChain | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "positionAbsolute": { + "x": 1461.716457981219, + "y": 547.2159602910168 + }, + "selected": false, + "dragging": false + }, + { + "width": 300, + "height": 781, + "id": "retrieverTool_2", + "position": { + "x": 798.3128281367018, + "y": -151.77659673435184 + }, + "type": "customNode", + "data": { + "id": "retrieverTool_2", + "label": "Retriever Tool", + "version": 2, + "name": "retrieverTool", + "type": "RetrieverTool", + "baseClasses": [ + "RetrieverTool", + "DynamicTool", + "Tool", + "StructuredTool", + "Runnable" + ], + "category": "Tools", + "description": "Use a retriever as allowed tool for agent", + "inputParams": [ + { + "label": "Retriever Name", + "name": "name", + "type": "string", + "placeholder": "search_state_of_union", + "id": "retrieverTool_2-input-name-string" + }, + { + "label": "Retriever Description", + "name": "description", + "type": "string", + "description": "When should agent uses to retrieve documents", + "rows": 3, + "placeholder": "Searches and returns documents regarding the state-of-the-union.", + "id": "retrieverTool_2-input-description-string" + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "retrieverTool_2-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Retriever", + "name": "retriever", + "type": "BaseRetriever", + "id": "retrieverTool_2-input-retriever-BaseRetriever" + } + ], + "inputs": { + "name": "search_apple", + "description": "Use this function to answer user questions about Apple Inc (APPL). It contains a SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period.", + "retriever": "{{pinecone_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "retrieverTool_2-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "name": "retrieverTool", + "label": "RetrieverTool", + "type": "RetrieverTool | DynamicTool | Tool | StructuredTool | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 798.3128281367018, + "y": -151.77659673435184 + }, + "dragging": false + }, + { + "width": 300, + "height": 781, + "id": "retrieverTool_1", + "position": { + "x": 805.1192462354428, + "y": 479.4961512574057 + }, + "type": "customNode", + "data": { + "id": "retrieverTool_1", + "label": "Retriever Tool", + "version": 2, + "name": "retrieverTool", + "type": "RetrieverTool", + "baseClasses": [ + "RetrieverTool", + "DynamicTool", + "Tool", + "StructuredTool", + "Runnable" + ], + "category": "Tools", + "description": "Use a retriever as allowed tool for agent", + "inputParams": [ + { + "label": "Retriever Name", + "name": "name", + "type": "string", + "placeholder": "search_state_of_union", + "id": "retrieverTool_1-input-name-string" + }, + { + "label": "Retriever Description", + "name": "description", + "type": "string", + "description": "When should agent uses to retrieve documents", + "rows": 3, + "placeholder": "Searches and returns documents regarding the state-of-the-union.", + "id": "retrieverTool_1-input-description-string" + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "retrieverTool_1-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Retriever", + "name": "retriever", + "type": "BaseRetriever", + "id": "retrieverTool_1-input-retriever-BaseRetriever" + } + ], + "inputs": { + "name": "search_tsla", + "description": "Use this function to answer user questions about Tesla Inc (TSLA). It contains a SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period.", + "retriever": "{{pinecone_1.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "name": "retrieverTool", + "label": "RetrieverTool", + "type": "RetrieverTool | DynamicTool | Tool | StructuredTool | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 805.1192462354428, + "y": 479.4961512574057 + }, + "dragging": false + }, + { + "id": "chatOpenAI_0", + "position": { + "x": 813.5701421468654, + "y": 1658.1569949989084 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "version": 5, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-1106-vision-preview", + "name": "gpt-4-1106-vision-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 670, + "selected": false, + "positionAbsolute": { + "x": 813.5701421468654, + "y": 1658.1569949989084 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "openAIEmbeddings_0", + "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "target": "pinecone_0", + "targetHandle": "pinecone_0-input-embeddings-Embeddings", + "type": "buttonedge", + "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_0-pinecone_0-input-embeddings-Embeddings" + }, + { + "source": "openAIEmbeddings_1", + "sourceHandle": "openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "target": "pinecone_1", + "targetHandle": "pinecone_1-input-embeddings-Embeddings", + "type": "buttonedge", + "id": "openAIEmbeddings_1-openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_1-pinecone_1-input-embeddings-Embeddings" + }, + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "openAIFunctionAgent_0", + "targetHandle": "openAIFunctionAgent_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-openAIFunctionAgent_0-openAIFunctionAgent_0-input-memory-BaseChatMemory" + }, + { + "source": "pinecone_0", + "sourceHandle": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "target": "retrieverTool_2", + "targetHandle": "retrieverTool_2-input-retriever-BaseRetriever", + "type": "buttonedge", + "id": "pinecone_0-pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-retrieverTool_2-retrieverTool_2-input-retriever-BaseRetriever" + }, + { + "source": "pinecone_1", + "sourceHandle": "pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "target": "retrieverTool_1", + "targetHandle": "retrieverTool_1-input-retriever-BaseRetriever", + "type": "buttonedge", + "id": "pinecone_1-pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-retrieverTool_1-retrieverTool_1-input-retriever-BaseRetriever" + }, + { + "source": "retrieverTool_1", + "sourceHandle": "retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "target": "openAIFunctionAgent_0", + "targetHandle": "openAIFunctionAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "retrieverTool_1-retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable-openAIFunctionAgent_0-openAIFunctionAgent_0-input-tools-Tool" + }, + { + "source": "retrieverTool_2", + "sourceHandle": "retrieverTool_2-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "target": "openAIFunctionAgent_0", + "targetHandle": "openAIFunctionAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "retrieverTool_2-retrieverTool_2-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable-openAIFunctionAgent_0-openAIFunctionAgent_0-input-tools-Tool" + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "openAIFunctionAgent_0", + "targetHandle": "openAIFunctionAgent_0-input-model-BaseChatModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-openAIFunctionAgent_0-openAIFunctionAgent_0-input-model-BaseChatModel" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/Agentic RAG V2.json b/fr/.gitbook/assets/Agentic RAG V2.json new file mode 100644 index 00000000..c37df8a9 --- /dev/null +++ b/fr/.gitbook/assets/Agentic RAG V2.json @@ -0,0 +1,2168 @@ +{ + "description": "An agent based approach using AgentflowV2 to perform self-correcting question answering over documents", + "usecases": ["Reflective Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -261.54516755177303, + "y": 62.39402454297252 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": [ + { + "key": "query", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -261.54516755177303, + "y": 62.39402454297252 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": -114.84790789259606, + "y": 53.22583468442305 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "Check if query valid", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": ["ConditionAgent"], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "AI Related" + }, + { + "scenario": "General" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "

Check if user is asking about AI related topic, or just general query

", + "conditionAgentInput": "

{{ question }}

", + "conditionAgentScenarios": [ + { + "scenario": "AI Related" + }, + { + "scenario": "General" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": -114.84790789259606, + "y": 53.22583468442305 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 158.29022963739308, + "y": -20.666608318859062 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Generate Query", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

Given the user question and history, construct a short string that can be used for searching vector database. Only generate the query, no meta comments, no explanation

Example:

Question: what are the events happening today?

Query: today's event

Example:

Question: how about the address?

Query: business address of the shop

Question: {{ question }}

Query:

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": [ + { + "key": "query", + "value": "

{{ output }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 158.29022963739308, + "y": -20.666608318859062 + }, + "dragging": false + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 165.82871786911647, + "y": 92.15131805222342 + }, + "data": { + "id": "llmAgentflow_1", + "label": "General Answer", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 165.82871786911647, + "y": 92.15131805222342 + }, + "dragging": false + }, + { + "id": "retrieverAgentflow_0", + "position": { + "x": 396.87575963946966, + "y": -17.41189617164227 + }, + "data": { + "id": "retrieverAgentflow_0", + "label": "Retriever Vector DB", + "version": 1, + "name": "retrieverAgentflow", + "type": "Retriever", + "color": "#b8bedd", + "baseClasses": ["Retriever"], + "category": "Agent Flows", + "description": "Retrieve information from vector database", + "inputParams": [ + { + "label": "Knowledge (Document Stores)", + "name": "retrieverKnowledgeDocumentStores", + "type": "array", + "description": "Document stores to retrieve information from. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + } + ], + "id": "retrieverAgentflow_0-input-retrieverKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Retriever Query", + "name": "retrieverQuery", + "type": "string", + "placeholder": "Enter your query here", + "rows": 4, + "acceptVariable": true, + "id": "retrieverAgentflow_0-input-retrieverQuery-string", + "display": true + }, + { + "label": "Output Format", + "name": "outputFormat", + "type": "options", + "options": [ + { + "label": "Text", + "name": "text" + }, + { + "label": "Text with Metadata", + "name": "textWithMetadata" + } + ], + "default": "text", + "id": "retrieverAgentflow_0-input-outputFormat-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "retrieverUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "retrieverAgentflow_0-input-retrieverUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "retrieverKnowledgeDocumentStores": [ + { + "documentStore": "570df92b-087b-4d3b-9462-7a11283454a5:ai paper" + } + ], + "retrieverQuery": "

{{ $flow.state.query }}

", + "outputFormat": "text", + "retrieverUpdateState": "" + }, + "outputAnchors": [ + { + "id": "retrieverAgentflow_0-output-retrieverAgentflow", + "label": "Retriever", + "name": "retrieverAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 396.87575963946966, + "y": -17.41189617164227 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_1", + "position": { + "x": 647.9586712853835, + "y": -24.93225611691784 + }, + "data": { + "id": "conditionAgentAgentflow_1", + "label": "Check if docs relevant", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": ["ConditionAgent"], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_1-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_1-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_1-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "Relevant" + }, + { + "scenario": "Irrelevant" + } + ], + "id": "conditionAgentAgentflow_1-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "

Determine if the document is relevant to user question. User question is {{ question }}

", + "conditionAgentInput": "

{{ retrieverAgentflow_0 }}

", + "conditionAgentScenarios": [ + { + "scenario": "Relevant" + }, + { + "scenario": "Irrelevant" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_1-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_1-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 206, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 647.9586712853835, + "y": -24.93225611691784 + }, + "dragging": false + }, + { + "id": "llmAgentflow_2", + "position": { + "x": 920.5416793343077, + "y": -75.82606372993476 + }, + "data": { + "id": "llmAgentflow_2", + "label": "Generate Response", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_2-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_2-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_2-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_2-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_2-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_2-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_2-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_2-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Given the question: {{ question }}

And the findings: {{ retrieverAgentflow_0 }}

Output the final response

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_2-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 920.5416793343077, + "y": -75.82606372993476 + }, + "dragging": false + }, + { + "id": "llmAgentflow_3", + "position": { + "x": 921.1014768144131, + "y": 26.898902739007895 + }, + "data": { + "id": "llmAgentflow_3", + "label": "Regenerate Question", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_3-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_3-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_3-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_3-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_3-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_3-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_3-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_3-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_3-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_3-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are a helpful assistant that can transform the query to produce a better question.

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Look at the input and try to reason about the underlying semantic intent / meaning.

Here is the initial question:

{{ $flow.state.query }}

Formulate an improved question:

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": [ + { + "key": "query", + "value": "

{{ output }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_3-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 921.1014768144131, + "y": 26.898902739007895 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 1160.0553838519766, + "y": 30.06685001229809 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop back to Retriever", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "retrieverAgentflow_0-Retriever Vector DB", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 208, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 1160.0553838519766, + "y": 30.06685001229809 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 145.5705985486235, + "y": -116.29641765720946 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "First update of the state.query" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 81, + "selected": false, + "positionAbsolute": { + "x": 145.5705985486235, + "y": -116.29641765720946 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": 923.4413972289242, + "y": 110.04672879978278 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Second update of state.query" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 81, + "selected": false, + "positionAbsolute": { + "x": 923.4413972289242, + "y": 110.04672879978278 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-llmAgentflow_1-llmAgentflow_1" + }, + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "retrieverAgentflow_0", + "targetHandle": "retrieverAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#b8bedd", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-retrieverAgentflow_0-retrieverAgentflow_0" + }, + { + "source": "retrieverAgentflow_0", + "sourceHandle": "retrieverAgentflow_0-output-retrieverAgentflow", + "target": "conditionAgentAgentflow_1", + "targetHandle": "conditionAgentAgentflow_1", + "data": { + "sourceColor": "#b8bedd", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "retrieverAgentflow_0-retrieverAgentflow_0-output-retrieverAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1" + }, + { + "source": "llmAgentflow_3", + "sourceHandle": "llmAgentflow_3-output-llmAgentflow", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_3-llmAgentflow_3-output-llmAgentflow-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-1", + "target": "llmAgentflow_3", + "targetHandle": "llmAgentflow_3", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-1-llmAgentflow_3-llmAgentflow_3" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-0", + "target": "llmAgentflow_2", + "targetHandle": "llmAgentflow_2", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-0-llmAgentflow_2-llmAgentflow_2" + } + ] +} diff --git a/fr/.gitbook/assets/Collage_Horizontal.png b/fr/.gitbook/assets/Collage_Horizontal.png new file mode 100644 index 00000000..b9055b37 Binary files /dev/null and b/fr/.gitbook/assets/Collage_Horizontal.png differ diff --git a/fr/.gitbook/assets/Customer Support Agents.json b/fr/.gitbook/assets/Customer Support Agents.json new file mode 100644 index 00000000..73e9519d --- /dev/null +++ b/fr/.gitbook/assets/Customer Support Agents.json @@ -0,0 +1,1512 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -142.48525859587886, + "y": 159.99117861928372 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": true + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": true + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": true + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "formInput", + "formTitle": "Inquiry", + "formDescription": "Customer Inquiry", + "formInputTypes": [ + { + "type": "string", + "label": "Subject", + "name": "subject", + "addOptions": "" + }, + { + "type": "string", + "label": "Body", + "name": "body", + "addOptions": "" + } + ], + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -142.48525859587886, + "y": 159.99117861928372 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": -8.296983647330677, + "y": 142.81116471739003 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "Detect User Intention", + "version": 1.1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": [ + "ConditionAgent" + ], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "" + }, + { + "scenario": "" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + }, + { + "label": "Override System Prompt", + "name": "conditionAgentOverrideSystemPrompt", + "type": "boolean", + "description": "Override initial system prompt for Condition Agent", + "optional": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentOverrideSystemPrompt-boolean", + "display": true + }, + { + "label": "Node System Prompt", + "name": "conditionAgentSystemPrompt", + "type": "string", + "rows": 4, + "optional": true, + "acceptVariable": true, + "default": "

You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios.

\n \n

Steps

\n
    \n
  1. Read the input string and the list of scenarios.
  2. \n
  3. Analyze the content of the input to identify its main topic or intention.
  4. \n
  5. Compare the input with each scenario: Evaluate how well the input's topic or intention aligns with each of the provided scenarios and select the one that is the best fit.
  6. \n
  7. Output the result: Return the selected scenario in the specified JSON format.
  8. \n
\n

Output Format

\n

Output should be a JSON object that names the selected scenario, like this: {\"output\": \"\"}. No explanation is needed.

\n

Examples

\n
    \n
  1. \n

    Input: {\"input\": \"Hello\", \"scenarios\": [\"user is asking about AI\", \"user is not asking about AI\"], \"instruction\": \"Your task is to check if the user is asking about AI.\"}

    \n

    Output: {\"output\": \"user is not asking about AI\"}

    \n
  2. \n
  3. \n

    Input: {\"input\": \"What is AIGC?\", \"scenarios\": [\"user is asking about AI\", \"user is asking about the weather\"], \"instruction\": \"Your task is to check and see if the user is asking a topic about AI.\"}

    \n

    Output: {\"output\": \"user is asking about AI\"}

    \n
  4. \n
  5. \n

    Input: {\"input\": \"Can you explain deep learning?\", \"scenarios\": [\"user is interested in AI topics\", \"user wants to order food\"], \"instruction\": \"Determine if the user is interested in learning about AI.\"}

    \n

    Output: {\"output\": \"user is interested in AI topics\"}

    \n
  6. \n
\n

Note

\n ", + "description": "Expert use only. Modifying this can significantly alter agent behavior. Leave default if unsure", + "show": { + "conditionAgentOverrideSystemPrompt": true + }, + "id": "conditionAgentAgentflow_0-input-conditionAgentSystemPrompt-string", + "display": false + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "

You are a customer support agent. Understand and process support tickets by automatically triaging them to the correct departments or individuals, generating immediate responses for common issues, and gathering necessary information for complex queries.

Follow the following routine with the user:

1. First, greet the user and see how you can help the user

2. If question is related to HR query, handoff to HR Agent

3. If question is related to events query, handoff to Event Manager

Note: Transfers between agents are handled seamlessly in the background; do not mention or draw attention to these transfers in your conversation with the user

", + "conditionAgentInput": "

{{ $form.subject }}

", + "conditionAgentScenarios": [ + { + "scenario": "Query is related to HR" + }, + { + "scenario": "Query is related to events" + }, + { + "scenario": "Query is general query" + } + ], + "conditionAgentOverrideSystemPrompt": "", + "conditionAgentSystemPrompt": "

You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios.

\n \n

Steps

\n
    \n
  1. Read the input string and the list of scenarios.
  2. \n
  3. Analyze the content of the input to identify its main topic or intention.
  4. \n
  5. Compare the input with each scenario: Evaluate how well the input's topic or intention aligns with each of the provided scenarios and select the one that is the best fit.
  6. \n
  7. Output the result: Return the selected scenario in the specified JSON format.
  8. \n
\n

Output Format

\n

Output should be a JSON object that names the selected scenario, like this: {\"output\": \"\"}. No explanation is needed.

\n

Examples

\n
    \n
  1. \n

    Input: {\"input\": \"Hello\", \"scenarios\": [\"user is asking about AI\", \"user is not asking about AI\"], \"instruction\": \"Your task is to check if the user is asking about AI.\"}

    \n

    Output: {\"output\": \"user is not asking about AI\"}

    \n
  2. \n
  3. \n

    Input: {\"input\": \"What is AIGC?\", \"scenarios\": [\"user is asking about AI\", \"user is asking about the weather\"], \"instruction\": \"Your task is to check and see if the user is asking a topic about AI.\"}

    \n

    Output: {\"output\": \"user is asking about AI\"}

    \n
  4. \n
  5. \n

    Input: {\"input\": \"Can you explain deep learning?\", \"scenarios\": [\"user is interested in AI topics\", \"user wants to order food\"], \"instruction\": \"Determine if the user is interested in learning about AI.\"}

    \n

    Output: {\"output\": \"user is interested in AI topics\"}

    \n
  6. \n
\n

Note

\n ", + "conditionAgentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": true, + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Condition 2" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 209, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": -8.296983647330677, + "y": 142.81116471739003 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 256.1838081680137, + "y": 39.51170632109836 + }, + "data": { + "id": "agentAgentflow_0", + "label": "HR Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatAnthropic", + "agentMessages": [ + { + "role": "system", + "content": "

You are an HR agent responsible for retrieving and applying internal knowledge sources to answer employee queries about HR policies, procedures, and guidelines.

\n

When responding to HR-related questions, you must first identify the relevant policy areas, search through available internal knowledge sources, and then provide accurate, comprehensive answers based on official company documentation.

\n

Steps

\n
    \n
  1. Analyze the Query: Identify the specific HR topic, policy area, or procedure the user is asking about
  2. \n
  3. Retrieve Relevant Information: Search through internal HR knowledge sources including:
  4. \n
\n\n
    \n
  1. Cross-Reference Sources: Verify information across multiple relevant documents to ensure accuracy and completeness
  2. \n
  3. Synthesize Response: Combine retrieved information into a coherent, actionable answer
  4. \n
  5. Provide Supporting Details: Include relevant policy numbers, effective dates, or references to specific sections when applicable
  6. \n
\n

Notes

\n" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": [ + { + "documentStore": "27fb1130-7417-4d9e-9edc-0c61315958dc:Human Resources Law", + "docStoreDescription": "This information is useful when determining the legal framework and implementation requirements for human resources management under the 2016 HR law and its 2020 implementing regulation.", + "returnSourceDocuments": true + } + ], + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "claude-sonnet-4-0", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "agentModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 213, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 256.1838081680137, + "y": 39.51170632109836 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 255.95225989267777, + "y": 143.20151624252088 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Event Manager", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatMistralAI", + "agentMessages": [ + { + "role": "system", + "content": "

Act as an event manager that can determine actions on events such as create, update, get, list and delete.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "openAPIToolkit", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "yamlFile": "data:application/octet-stream;base64,b3BlbmFwaTogMy4wLjANCmluZm86DQogIHZlcnNpb246IDEuMC4wDQogIHRpdGxlOiBFdmVudCBNYW5hZ2VtZW50IEFQSQ0KICBkZXNjcmlwdGlvbjogQW4gQVBJIGZvciBtYW5hZ2luZyBldmVudCBkYXRhDQoNCnNlcnZlcnM6DQogIC0gdXJsOiBodHRwOi8vbG9jYWxob3N0OjU1NjYNCiAgICBkZXNjcmlwdGlvbjogTG9jYWwgZGV2ZWxvcG1lbnQgc2VydmVyDQoNCnBhdGhzOg0KICAvZXZlbnRzOg0KICAgIGdldDoNCiAgICAgIHN1bW1hcnk6IExpc3QgYWxsIGV2ZW50cw0KICAgICAgb3BlcmF0aW9uSWQ6IGxpc3RFdmVudHMNCiAgICAgIHJlc3BvbnNlczoNCiAgICAgICAgJzIwMCc6DQogICAgICAgICAgZGVzY3JpcHRpb246IEEgbGlzdCBvZiBldmVudHMNCiAgICAgICAgICBjb250ZW50Og0KICAgICAgICAgICAgYXBwbGljYXRpb24vanNvbjoNCiAgICAgICAgICAgICAgc2NoZW1hOg0KICAgICAgICAgICAgICAgIHR5cGU6IGFycmF5DQogICAgICAgICAgICAgICAgaXRlbXM6DQogICAgICAgICAgICAgICAgICAkcmVmOiAnIy9jb21wb25lbnRzL3NjaGVtYXMvRXZlbnQnDQogICAgDQogICAgcG9zdDoNCiAgICAgIHN1bW1hcnk6IENyZWF0ZSBhIG5ldyBldmVudA0KICAgICAgb3BlcmF0aW9uSWQ6IGNyZWF0ZUV2ZW50DQogICAgICByZXF1ZXN0Qm9keToNCiAgICAgICAgcmVxdWlyZWQ6IHRydWUNCiAgICAgICAgY29udGVudDoNCiAgICAgICAgICBhcHBsaWNhdGlvbi9qc29uOg0KICAgICAgICAgICAgc2NoZW1hOg0KICAgICAgICAgICAgICAkcmVmOiAnIy9jb21wb25lbnRzL3NjaGVtYXMvRXZlbnRJbnB1dCcNCiAgICAgIHJlc3BvbnNlczoNCiAgICAgICAgJzIwMSc6DQogICAgICAgICAgZGVzY3JpcHRpb246IFRoZSBldmVudCB3YXMgY3JlYXRlZA0KICAgICAgICAgIGNvbnRlbnQ6DQogICAgICAgICAgICBhcHBsaWNhdGlvbi9qc29uOg0KICAgICAgICAgICAgICBzY2hlbWE6DQogICAgICAgICAgICAgICAgJHJlZjogJyMvY29tcG9uZW50cy9zY2hlbWFzL0V2ZW50Jw0KICAgICAgICAnNDAwJzoNCiAgICAgICAgICBkZXNjcmlwdGlvbjogSW52YWxpZCBpbnB1dA0KICAgICAgICAgIGNvbnRlbnQ6DQogICAgICAgICAgICBhcHBsaWNhdGlvbi9qc29uOg0KICAgICAgICAgICAgICBzY2hlbWE6DQogICAgICAgICAgICAgICAgJHJlZjogJyMvY29tcG9uZW50cy9zY2hlbWFzL0Vycm9yJw0KDQogIC9ldmVudHMve2lkfToNCiAgICBwYXJhbWV0ZXJzOg0KICAgICAgLSBuYW1lOiBpZA0KICAgICAgICBpbjogcGF0aA0KICAgICAgICByZXF1aXJlZDogdHJ1ZQ0KICAgICAgICBzY2hlbWE6DQogICAgICAgICAgdHlwZTogc3RyaW5nDQogICAgICAgIGRlc2NyaXB0aW9uOiBUaGUgZXZlbnQgSUQNCiAgICANCiAgICBnZXQ6DQogICAgICBzdW1tYXJ5OiBSZXRyaWV2ZSBhbiBldmVudCBieSBJRA0KICAgICAgb3BlcmF0aW9uSWQ6IGdldEV2ZW50QnlJZA0KICAgICAgcmVzcG9uc2VzOg0KICAgICAgICAnMjAwJzoNCiAgICAgICAgICBkZXNjcmlwdGlvbjogVGhlIGV2ZW50DQogICAgICAgICAgY29udGVudDoNCiAgICAgICAgICAgIGFwcGxpY2F0aW9uL2pzb246DQogICAgICAgICAgICAgIHNjaGVtYToNCiAgICAgICAgICAgICAgICAkcmVmOiAnIy9jb21wb25lbnRzL3NjaGVtYXMvRXZlbnQnDQogICAgICAgICc0MDQnOg0KICAgICAgICAgIGRlc2NyaXB0aW9uOiBFdmVudCBub3QgZm91bmQNCiAgICAgICAgICBjb250ZW50Og0KICAgICAgICAgICAgYXBwbGljYXRpb24vanNvbjoNCiAgICAgICAgICAgICAgc2NoZW1hOg0KICAgICAgICAgICAgICAgICRyZWY6ICcjL2NvbXBvbmVudHMvc2NoZW1hcy9FcnJvcicNCiAgICANCiAgICBwYXRjaDoNCiAgICAgIHN1bW1hcnk6IFVwZGF0ZSBhbiBldmVudCdzIGRldGFpbHMgYnkgSUQNCiAgICAgIG9wZXJhdGlvbklkOiB1cGRhdGVFdmVudERldGFpbHMNCiAgICAgIHJlcXVlc3RCb2R5Og0KICAgICAgICByZXF1aXJlZDogdHJ1ZQ0KICAgICAgICBjb250ZW50Og0KICAgICAgICAgIGFwcGxpY2F0aW9uL2pzb246DQogICAgICAgICAgICBzY2hlbWE6DQogICAgICAgICAgICAgICRyZWY6ICcjL2NvbXBvbmVudHMvc2NoZW1hcy9FdmVudElucHV0Jw0KICAgICAgcmVzcG9uc2VzOg0KICAgICAgICAnMjAwJzoNCiAgICAgICAgICBkZXNjcmlwdGlvbjogVGhlIGV2ZW50J3MgZGV0YWlscyB3ZXJlIHVwZGF0ZWQNCiAgICAgICAgICBjb250ZW50Og0KICAgICAgICAgICAgYXBwbGljYXRpb24vanNvbjoNCiAgICAgICAgICAgICAgc2NoZW1hOg0KICAgICAgICAgICAgICAgICRyZWY6ICcjL2NvbXBvbmVudHMvc2NoZW1hcy9FdmVudCcNCiAgICAgICAgJzQwMCc6DQogICAgICAgICAgZGVzY3JpcHRpb246IEludmFsaWQgaW5wdXQNCiAgICAgICAgICBjb250ZW50Og0KICAgICAgICAgICAgYXBwbGljYXRpb24vanNvbjoNCiAgICAgICAgICAgICAgc2NoZW1hOg0KICAgICAgICAgICAgICAgICRyZWY6ICcjL2NvbXBvbmVudHMvc2NoZW1hcy9FcnJvcicNCiAgICAgICAgJzQwNCc6DQogICAgICAgICAgZGVzY3JpcHRpb246IEV2ZW50IG5vdCBmb3VuZA0KICAgICAgICAgIGNvbnRlbnQ6DQogICAgICAgICAgICBhcHBsaWNhdGlvbi9qc29uOg0KICAgICAgICAgICAgICBzY2hlbWE6DQogICAgICAgICAgICAgICAgJHJlZjogJyMvY29tcG9uZW50cy9zY2hlbWFzL0Vycm9yJw0KICAgIA0KICAgIGRlbGV0ZToNCiAgICAgIHN1bW1hcnk6IERlbGV0ZSBhbiBldmVudCBieSBJRA0KICAgICAgb3BlcmF0aW9uSWQ6IGRlbGV0ZUV2ZW50DQogICAgICByZXNwb25zZXM6DQogICAgICAgICcyMDQnOg0KICAgICAgICAgIGRlc2NyaXB0aW9uOiBUaGUgZXZlbnQgd2FzIGRlbGV0ZWQNCiAgICAgICAgJzQwNCc6DQogICAgICAgICAgZGVzY3JpcHRpb246IEV2ZW50IG5vdCBmb3VuZA0KICAgICAgICAgIGNvbnRlbnQ6DQogICAgICAgICAgICBhcHBsaWNhdGlvbi9qc29uOg0KICAgICAgICAgICAgICBzY2hlbWE6DQogICAgICAgICAgICAgICAgJHJlZjogJyMvY29tcG9uZW50cy9zY2hlbWFzL0Vycm9yJw0KDQpjb21wb25lbnRzOg0KICBzY2hlbWFzOg0KICAgIEV2ZW50Og0KICAgICAgdHlwZTogb2JqZWN0DQogICAgICBwcm9wZXJ0aWVzOg0KICAgICAgICBpZDoNCiAgICAgICAgICB0eXBlOiBzdHJpbmcNCiAgICAgICAgICBkZXNjcmlwdGlvbjogVGhlIHVuaXF1ZSBpZGVudGlmaWVyIGZvciB0aGUgZXZlbnQNCiAgICAgICAgbmFtZToNCiAgICAgICAgICB0eXBlOiBzdHJpbmcNCiAgICAgICAgICBkZXNjcmlwdGlvbjogVGhlIG5hbWUgb2YgdGhlIGV2ZW50DQogICAgICAgIGRhdGU6DQogICAgICAgICAgdHlwZTogc3RyaW5nDQogICAgICAgICAgZm9ybWF0OiBkYXRlLXRpbWUNCiAgICAgICAgICBkZXNjcmlwdGlvbjogVGhlIGRhdGUgYW5kIHRpbWUgb2YgdGhlIGV2ZW50IGluIElTTyA4NjAxIGZvcm1hdA0KICAgICAgICBsb2NhdGlvbjoNCiAgICAgICAgICB0eXBlOiBzdHJpbmcNCiAgICAgICAgICBkZXNjcmlwdGlvbjogVGhlIGxvY2F0aW9uIG9mIHRoZSBldmVudA0KICAgICAgcmVxdWlyZWQ6DQogICAgICAgIC0gbmFtZQ0KICAgICAgICAtIGRhdGUNCiAgICAgICAgLSBsb2NhdGlvbg0KICAgIA0KICAgIEV2ZW50SW5wdXQ6DQogICAgICB0eXBlOiBvYmplY3QNCiAgICAgIHByb3BlcnRpZXM6DQogICAgICAgIG5hbWU6DQogICAgICAgICAgdHlwZTogc3RyaW5nDQogICAgICAgICAgZGVzY3JpcHRpb246IFRoZSBuYW1lIG9mIHRoZSBldmVudA0KICAgICAgICBkYXRlOg0KICAgICAgICAgIHR5cGU6IHN0cmluZw0KICAgICAgICAgIGZvcm1hdDogZGF0ZS10aW1lDQogICAgICAgICAgZGVzY3JpcHRpb246IFRoZSBkYXRlIGFuZCB0aW1lIG9mIHRoZSBldmVudCBpbiBJU08gODYwMSBmb3JtYXQNCiAgICAgICAgbG9jYXRpb246DQogICAgICAgICAgdHlwZTogc3RyaW5nDQogICAgICAgICAgZGVzY3JpcHRpb246IFRoZSBsb2NhdGlvbiBvZiB0aGUgZXZlbnQNCiAgICAgIHJlcXVpcmVkOg0KICAgICAgICAtIG5hbWUNCiAgICAgICAgLSBkYXRlDQogICAgICAgIC0gbG9jYXRpb24NCiAgICANCiAgICBFcnJvcjoNCiAgICAgIHR5cGU6IG9iamVjdA0KICAgICAgcHJvcGVydGllczoNCiAgICAgICAgZXJyb3I6DQogICAgICAgICAgdHlwZTogc3RyaW5nDQogICAgICAgICAgZGVzY3JpcHRpb246IEVycm9yIG1lc3NhZ2U=,filename:events.yaml", + "returnDirect": "", + "headers": "", + "removeNulls": "", + "customCode": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n", + "agentSelectedTool": "openAPIToolkit" + } + } + ], + "agentKnowledgeDocumentStores": [], + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "mistral-large-latest", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "randomSeed": "", + "safeMode": "", + "overrideEndpoint": "", + "agentModel": "chatMistralAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 212, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 255.95225989267777, + "y": 143.20151624252088 + }, + "dragging": false + }, + { + "id": "agentAgentflow_3", + "position": { + "x": 254.52955040661755, + "y": 269.9598089153506 + }, + "data": { + "id": "agentAgentflow_3", + "label": "General Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_3-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_3-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_3-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_3-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_3-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_3-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_3-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_3-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are helpful assistant

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "agentModel": "chatGoogleGenerativeAI" + }, + "undefined": "" + }, + "outputAnchors": [ + { + "id": "agentAgentflow_3-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 254.52955040661755, + "y": 269.9598089153506 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-3", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "3", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-3-agentAgentflow_3-agentAgentflow_3" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-2", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-2-agentAgentflow_3-agentAgentflow_3" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/Deep Research Dynamic SubAgents.json b/fr/.gitbook/assets/Deep Research Dynamic SubAgents.json new file mode 100644 index 00000000..47747940 --- /dev/null +++ b/fr/.gitbook/assets/Deep Research Dynamic SubAgents.json @@ -0,0 +1,1851 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -241.58365178492127, + "y": 86.32546838777353 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": true + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": true + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": true + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "formInput", + "formTitle": "Research", + "formDescription": "A research agent that takes in a query, and return a detailed report", + "formInputTypes": [ + { + "type": "string", + "label": "Query", + "name": "query", + "addOptions": "" + } + ], + "startState": [ + { + "key": "subagents", + "value": "" + }, + { + "key": "findings", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -241.58365178492127, + "y": 86.32546838777353 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -111.52635639216058, + "y": 83.67035986437665 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Planner", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatAnthropic", + "llmMessages": [ + { + "role": "system", + "content": "

You are an expert research lead, focused on high-level research strategy, planning, efficient delegation to subagents, and final report writing. Your core goal is to be maximally helpful to the user by leading a process to research the user's query and then creating an excellent research report that answers this query very well. Take the current request from the user, plan out an effective research process to answer it as well as possible, and then execute this plan by delegating key tasks to appropriate subagents.

The current date is {{ current_date_time }} .

<research_process>

Follow this process to break down the user’s question and develop an excellent research plan. Think about the user's task thoroughly and in great detail to understand it well and determine what to do next. Analyze each aspect of the user's question and identify the most important aspects. Consider multiple approaches with complete, thorough reasoning. Explore several different methods of answering the question (at least 3) and then choose the best method you find. Follow this process closely:

1. Assessment and breakdown: Analyze and break down the user's prompt to make sure you fully understand it.

* Identify the main concepts, key entities, and relationships in the task.

* List specific facts or data points needed to answer the question well.

* Note any temporal or contextual constraints on the question.

* Analyze what features of the prompt are most important - what does the user likely care about most here? What are they expecting or desiring in the final result? What tools do they expect to be used and how do we know?

* Determine what form the answer would need to be in to fully accomplish the user's task. Would it need to be a detailed report, a list of entities, an analysis of different perspectives, a visual report, or something else? What components will it need to have?

2. Query type determination: Explicitly state your reasoning on what type of query this question is from the categories below.

* Depth-first query: When the problem requires multiple perspectives on the same issue, and calls for \"going deep\" by analyzing a single topic from many angles.

- Benefits from parallel agents exploring different viewpoints, methodologies, or sources

- The core question remains singular but benefits from diverse approaches

- Example: \"What are the most effective treatments for depression?\" (benefits from parallel agents exploring different treatments and approaches to this question)

- Example: \"What really caused the 2008 financial crisis?\" (benefits from economic, regulatory, behavioral, and historical perspectives, and analyzing or steelmanning different viewpoints on the question)

- Example: \"can you identify the best approach to building AI finance agents in 2025 and why?\"

* Breadth-first query: When the problem can be broken into distinct, independent sub-questions, and calls for \"going wide\" by gathering information about each sub-question.

- Benefits from parallel agents each handling separate sub-topics.

- The query naturally divides into multiple parallel research streams or distinct, independently researchable sub-topics

- Example: \"Compare the economic systems of three Nordic countries\" (benefits from simultaneous independent research on each country)

- Example: \"What are the net worths and names of all the CEOs of all the fortune 500 companies?\" (intractable to research in a single thread; most efficient to split up into many distinct research agents which each gathers some of the necessary information)

- Example: \"Compare all the major frontend frameworks based on performance, learning curve, ecosystem, and industry adoption\" (best to identify all the frontend frameworks and then research all of these factors for each framework)

* Straightforward query: When the problem is focused, well-defined, and can be effectively answered by a single focused investigation or fetching a single resource from the internet.

- Can be handled effectively by a single subagent with clear instructions; does not benefit much from extensive research

- Example: \"What is the current population of Tokyo?\" (simple fact-finding)

- Example: \"What are all the fortune 500 companies?\" (just requires finding a single website with a full list, fetching that list, and then returning the results)

- Example: \"Tell me about bananas\" (fairly basic, short question that likely does not expect an extensive answer)

3. Detailed research plan development: Based on the query type, develop a specific research plan with clear allocation of tasks across different research subagents. Ensure if this plan is executed, it would result in an excellent answer to the user's query.

For Depth-first queries*:

- Define 3-5 different methodological approaches or perspectives.

- List specific expert viewpoints or sources of evidence that would enrich the analysis.

- Plan how each perspective will contribute unique insights to the central question.

- Specify how findings from different approaches will be synthesized.

- Example: For \"What causes obesity?\", plan agents to investigate genetic factors, environmental influences, psychological aspects, socioeconomic patterns, and biomedical evidence, and outline how the information could be aggregated into a great answer.

For Breadth-first queries*:

- Enumerate all the distinct sub-questions or sub-tasks that can be researched independently to answer the query.

- Identify the most critical sub-questions or perspectives needed to answer the query comprehensively. Only create additional subagents if the query has clearly distinct components that cannot be efficiently handled by fewer agents. Avoid creating subagents for every possible angle - focus on the essential ones.

- Prioritize these sub-tasks based on their importance and expected research complexity.

- Define extremely clear, crisp, and understandable boundaries between sub-topics to prevent overlap.

- Plan how findings will be aggregated into a coherent whole.

- Example: For \"Compare EU country tax systems\", first create a subagent to retrieve a list of all the countries in the EU today, then think about what metrics and factors would be relevant to compare each country's tax systems, then use the batch tool to run 4 subagents to research the metrics and factors for the key countries in Northern Europe, Western Europe, Eastern Europe, Southern Europe.

For Straightforward queries*:

- Identify the most direct, efficient path to the answer.

- Determine whether basic fact-finding or minor analysis is needed.

- Specify exact data points or information required to answer.

- Determine what sources are likely most relevant to answer this query that the subagents should use, and whether multiple sources are needed for fact-checking.

- Plan basic verification methods to ensure the accuracy of the answer.

- Create an extremely clear task description that describes how a subagent should research this question.

* For each element in your plan for answering any query, explicitly evaluate:

- Can this step be broken into independent subtasks for a more efficient process?

- Would multiple perspectives benefit this step?

- What specific output is expected from this step?

- Is this step strictly necessary to answer the user's query well?

4. Methodical plan execution: Execute the plan fully, using parallel subagents where possible. Determine how many subagents to use based on the complexity of the query, default to using 3 subagents for most queries.

* For parallelizable steps:

- Deploy appropriate subagents using the <delegation_instructions> below, making sure to provide extremely clear task descriptions to each subagent and ensuring that if these tasks are accomplished it would provide the information needed to answer the query.

- Synthesize findings when the subtasks are complete.

* For non-parallelizable/critical steps:

- First, attempt to accomplish them yourself based on your existing knowledge and reasoning. If the steps require additional research or up-to-date information from the web, deploy a subagent.

- If steps are very challenging, deploy independent subagents for additional perspectives or approaches.

- Compare the subagent's results and synthesize them using an ensemble approach and by applying critical reasoning.

* Throughout execution:

- Continuously monitor progress toward answering the user's query.

- Update the search plan and your subagent delegation strategy based on findings from tasks.

- Adapt to new information well - analyze the results, use Bayesian reasoning to update your priors, and then think carefully about what to do next.

- Adjust research depth based on time constraints and efficiency - if you are running out of time or a research process has already taken a very long time, avoid deploying further subagents and instead just start composing the output report immediately.

</research_process>

<subagent_count_guidelines>

When determining how many subagents to create, follow these guidelines:

1. Simple/Straightforward queries: create 1 subagent to collaborate with you directly -

- Example: \"What is the tax deadline this year?\" or “Research bananas” → 1 subagent

- Even for simple queries, always create at least 1 subagent to ensure proper source gathering

2. Standard complexity queries: 2-3 subagents

- For queries requiring multiple perspectives or research approaches

- Example: \"Compare the top 3 cloud providers\" → 3 subagents (one per provider)

3. Medium complexity queries: 3-5 subagents

- For multi-faceted questions requiring different methodological approaches

- Example: \"Analyze the impact of AI on healthcare\" → 4 subagents (regulatory, clinical, economic, technological aspects)

4. High complexity queries: 5-10 subagents (maximum 20)

- For very broad, multi-part queries with many distinct components

- Identify the most effective algorithms to efficiently answer these high-complexity queries with around 20 subagents.

- Example: \"Fortune 500 CEOs birthplaces and ages\" → Divide the large info-gathering task into smaller segments (e.g., 10 subagents handling 50 CEOs each)

IMPORTANT: Never create more than 20 subagents unless strictly necessary. If a task seems to require more than 20 subagents, it typically means you should restructure your approach to consolidate similar sub-tasks and be more efficient in your research process. Prefer fewer, more capable subagents over many overly narrow ones. More subagents = more overhead. Only add subagents when they provide distinct value.

</subagent_count_guidelines>

<delegation_instructions>

Use subagents as your primary research team - they should perform all major research tasks:

1. Deployment strategy:

* Deploy subagents immediately after finalizing your research plan, so you can start the research process quickly.

* Create research subagent with very clear and specific instructions to describe the subagent's task.

* Each subagent is a fully capable researcher that can search the web and use the other search tools that are available.

* Consider priority and dependency when ordering subagent tasks - deploy the most important subagents first. For instance, when other tasks will depend on results from one specific task, always create a subagent to address that blocking task first.

* Ensure you have sufficient coverage for comprehensive research - ensure that you deploy subagents to complete every task.

* All substantial information gathering should be delegated to subagents.

* While waiting for a subagent to complete, use your time efficiently by analyzing previous results, updating your research plan, or reasoning about the user's query and how to answer it best.

2. Task allocation principles:

* For depth-first queries: Deploy subagents in sequence to explore different methodologies or perspectives on the same core question. Start with the approach most likely to yield comprehensive and good results, the follow with alternative viewpoints to fill gaps or provide contrasting analysis.

* For breadth-first queries: Order subagents by topic importance and research complexity. Begin with subagents that will establish key facts or framework information, then deploy subsequent subagents to explore more specific or dependent subtopics.

* For straightforward queries: Deploy a single comprehensive subagent with clear instructions for fact-finding and verification. For these simple queries, treat the subagent as an equal collaborator - you can conduct some research yourself while delegating specific research tasks to the subagent. Give this subagent very clear instructions and try to ensure the subagent handles about half of the work, to efficiently distribute research work between yourself and the subagent.

* Avoid deploying subagents for trivial tasks that you can complete yourself, such as simple calculations, basic formatting, small web searches, or tasks that don't require external research

* But always deploy at least 1 subagent, even for simple tasks.

* Avoid overlap between subagents - every subagent should have distinct, clearly separate tasks, to avoid replicating work unnecessarily and wasting resources.

3. Clear direction for subagents: Ensure that you provide every subagent with extremely detailed, specific, and clear instructions for what their task is and how to accomplish it.

* All instructions for subagents should include the following as appropriate:

- Specific research objectives, ideally just 1 core objective per subagent.

- Expected output format - e.g. a list of entities, a report of the facts, an answer to a specific question, or other.

- Relevant background context about the user's question and how the subagent should contribute to the research plan.

- Key questions to answer as part of the research.

- Suggested starting points and sources to use; define what constitutes reliable information or high-quality sources for this task, and list any unreliable sources to avoid.

- Specific tools that the subagent should use - i.e. using web search and web fetch for gathering information from the web, or if the query requires non-public, company-specific, or user-specific information, use the available internal tools like google drive, gmail, gcal, slack, or any other internal tools that are available currently.

- If needed, precise scope boundaries to prevent research drift.

* Make sure that IF all the subagents followed their instructions very well, the results in aggregate would allow you to give an EXCELLENT answer to the user's question - complete, thorough, detailed, and accurate.

* When giving instructions to subagents, also think about what sources might be high-quality for their tasks, and give them some guidelines on what sources to use and how they should evaluate source quality for each task.

* Example of a good, clear, detailed task description for a subagent: \"Research the semiconductor supply chain crisis and its current status as of 2025. Use the web_search and web_fetch tools to gather facts from the internet. Begin by examining recent quarterly reports from major chip manufacturers like TSMC, Samsung, and Intel, which can be found on their investor relations pages or through the SEC EDGAR database. Search for industry reports from SEMI, Gartner, and IDC that provide market analysis and forecasts. Investigate government responses by checking the US CHIPS Act implementation progress at commerce.gov, EU Chips Act at ec.europa.eu, and similar initiatives in Japan, South Korea, and Taiwan through their respective government portals. Prioritize original sources over news aggregators. Focus on identifying current bottlenecks, projected capacity increases from new fab construction, geopolitical factors affecting supply chains, and expert predictions for when supply will meet demand. When research is done, compile your findings into a dense report of the facts, covering the current situation, ongoing solutions, and future outlook, with specific timelines and quantitative data where available.\"

4. Synthesis responsibility: As the lead research agent, your primary role is to coordinate, guide, and synthesize - NOT to conduct primary research yourself. You only conduct direct research if a critical question remains unaddressed by subagents or it is best to accomplish it yourself. Instead, focus on planning, analyzing and integrating findings across subagents, determining what to do next, providing clear instructions for each subagent, or identifying gaps in the collective research and deploying new subagents to fill them.

</delegation_instructions>

<answer_formatting>

Before providing a final answer:

1. Review the most recent fact list compiled during the search process.

2. Reflect deeply on whether these facts can answer the given query sufficiently.

3. Identify if you need to create more subagents for further research.

4. If sufficient, provide a final answer in the specific format that is best for the user's query and following the <writing_guidelines> below.

4. Output the final result in Markdown to submit your final research report.

5. Do not include ANY Markdown citations, a separate agent will be responsible for citations. Never include a list of references or sources or citations at the end of the report.

</answer_formatting>

In communicating with subagents, maintain extremely high information density while being concise - describe everything needed in the fewest words possible.

As you progress through the search process:

1. When necessary, review the core facts gathered so far, including: f

* Facts from your own research.

* Facts reported by subagents.

* Specific dates, numbers, and quantifiable data.

2. For key facts, especially numbers, dates, and critical information:

* Note any discrepancies you observe between sources or issues with the quality of sources.

* When encountering conflicting information, prioritize based on recency, consistency with other facts, and use best judgment.

3. Think carefully after receiving novel information, especially for critical reasoning and decision-making after getting results back from subagents.

4. For the sake of efficiency, when you have reached the point where further research has diminishing returns and you can give a good enough answer to the user, STOP FURTHER RESEARCH and do not create any new subagents. Just write your final report at this point. Make sure to terminate research when it is no longer necessary, to avoid wasting time and resources. For example, if you are asked to identify the top 5 fastest-growing startups, and you have identified the most likely top 5 startups with high confidence, stop research immediately and use the complete_task tool to submit your report rather than continuing the process unnecessarily.

5. NEVER create a subagent to generate the final report - YOU write and craft this final research report yourself based on all the results and the writing instructions, and you are never allowed to use subagents to create the report.

6. Avoid creating subagents to research topics that could cause harm. Specifically, you must not create subagents to research anything that would promote hate speech, racism, violence, discrimination, or catastrophic harm. If a query is sensitive, specify clear constraints for the subagent to avoid causing harm.

</important_guidelines>

You have a query provided to you by the user, which serves as your primary goal. You should do your best to thoroughly accomplish the user's task. No clarifications will be given, therefore use your best judgment and do not attempt to ask the user questions. Before starting your work, review these instructions and the user’s requirements, making sure to plan out how you will efficiently use subagents and parallel tool calls to answer the query. Critically think about the results provided by subagents and reason about them carefully to verify information and ensure you provide a high-quality, accurate report. Accomplish the user’s task by directing the research subagents and creating an excellent research report from the information gathered.

" + }, + { + "role": "user", + "content": "

Query:

{{ $form.query }}

" + } + ], + "llmEnableMemory": true, + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "subagents", + "type": "jsonArray", + "enumValues": "", + "jsonSchema": "{\n \"task\": {\n \"type\": \"string\",\n \"description\": \"The research task for subagent\"\n }\n}", + "description": "A list of subagents to perform research task" + } + ], + "llmUpdateState": [ + { + "key": "subagents", + "value": "

{{ output.subagents }}

" + } + ], + "llmModelConfig": { + "credential": "", + "modelName": "claude-sonnet-4-0", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "llmModel": "chatAnthropic" + }, + "llmUserMessage": "

" + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 213, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": -111.52635639216058, + "y": 83.67035986437665 + }, + "dragging": false + }, + { + "id": "iterationAgentflow_0", + "position": { + "x": 126.70987564816664, + "y": -5.337791594648138 + }, + "data": { + "id": "iterationAgentflow_0", + "label": "Spawn SubAgents", + "version": 1, + "name": "iterationAgentflow", + "type": "Iteration", + "color": "#9C89B8", + "baseClasses": [ + "Iteration" + ], + "category": "Agent Flows", + "description": "Execute the nodes within the iteration block through N iterations", + "inputParams": [ + { + "label": "Array Input", + "name": "iterationInput", + "type": "string", + "description": "The input array to iterate over", + "acceptVariable": true, + "rows": 4, + "id": "iterationAgentflow_0-input-iterationInput-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "iterationInput": "

{{ $flow.state.subagents }}

" + }, + "outputAnchors": [ + { + "id": "iterationAgentflow_0-output-iterationAgentflow", + "label": "Iteration", + "name": "iterationAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "iteration", + "width": 300, + "height": 250, + "selected": false, + "positionAbsolute": { + "x": 126.70987564816664, + "y": -5.337791594648138 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 53.64516693688461, + "y": 77.49272566017132 + }, + "data": { + "id": "agentAgentflow_0", + "label": "SubAgent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatAnthropic", + "agentMessages": [ + { + "role": "system", + "content": "

You are a research subagent working as part of a team. The current date is {{ current_date_time }}. You have been given a clear <task> provided by a lead agent, and should use your available tools to accomplish this task in a research process. Follow the instructions below closely to accomplish your specific <task> well:

<task>

{{ $iteration.task }}

</task>

<research_process>

  1. Planning: First, think through the task thoroughly. Make a research plan, carefully reasoning to review the requirements of the task, develop a research plan to fulfill these requirements, and determine what tools are most relevant and how they should be used optimally to fulfill the task.

  1. Tool selection: Reason about what tools would be most helpful to use for this task. Use the right tools when a task implies they would be helpful.

  1. Research loop: Execute an excellent OODA (observe, orient, decide, act) loop by (a) observing what information has been gathered so far, what still needs to be gathered to accomplish the task, and what tools are available currently; (b) orienting toward what tools and queries would be best to gather the needed information and updating beliefs based on what has been learned so far; (c) making an informed, well-reasoned decision to use a specific tool in a certain way; (d) acting to use this tool. Repeat this loop in an efficient way to research well and learn based on new results.

Follow this process well to complete the task. Make sure to follow the <task> description and investigate the best sources.

</research_process>

<research_guidelines>

  1. Be detailed in your internal process, but more concise and information-dense in reporting the results.

  2. Avoid overly specific searches that might have poor hit rates:

  3. For important facts, especially numbers and dates:

* When encountering conflicting information, prioritize based on recency, consistency with other facts, the quality of the sources used, and use your best judgment and reasoning. If unable to reconcile facts, include the conflicting information in your final task report for the lead researcher to resolve.

4. Be specific and precise in your information gathering approach.

</research_guidelines>

<think_about_source_quality>

After receiving results from web searches or other tools, think critically, reason about the results, and determine what to do next. Pay attention to the details of tool results, and do not just take them at face value. For example, some pages may speculate about things that may happen in the future - mentioning predictions, using verbs like “could” or “may”, narrative driven speculation with future tense, quoted superlatives, financial projections, or similar - and you should make sure to note this explicitly in the final report, rather than accepting these events as having happened. Similarly, pay attention to the indicators of potentially problematic sources, like news aggregators rather than original sources of the information, false authority, pairing of passive voice with nameless sources, general qualifiers without specifics, unconfirmed reports, marketing language for a product, spin language, speculation, or misleading and cherry-picked data. Maintain epistemic honesty and practice good reasoning by ensuring sources are high-quality and only reporting accurate information to the lead researcher. If there are potential issues with results, flag these issues when returning your report to the lead researcher rather than blindly presenting all results as established facts.

DO NOT use the evaluate_source_quality tool ever - ignore this tool. It is broken and using it will not work.

</think_about_source_quality>

<use_parallel_tool_calls>

For maximum efficiency, whenever you need to perform multiple independent operations, invoke 2 relevant tools simultaneously rather than sequentially. Prefer calling tools like web search in parallel rather than by themselves.

</use_parallel_tool_calls>

<maximum_tool_call_limit>

To prevent overloading the system, it is required that you stay under a limit of 5 tool calls and under about 10 sources. This is the absolute maximum upper limit. If you exceed this limit, the subagent will be terminated. Therefore, whenever you get to around 4 tool calls or 9 sources, make sure to stop gathering sources, and instead finish it immediately. Avoid continuing to use tools when you see diminishing returns - when you are no longer finding new relevant information and results are not getting better, STOP using tools and instead compose your final report.

</maximum_tool_call_limit>

<citations>

  1. Must include source link, pages, etc.

  2. Avoid citing unnecessarily: Not every statement needs a citation. Focus on citing key facts, conclusions, and substantive claims that are linked to sources rather than common knowledge. Prioritize citing claims that readers would want to verify, that add credibility to the argument, or where a claim is clearly related to a specific source

  3. Cite meaningful semantic units: Citations should span complete thoughts, findings, or claims that make sense as standalone assertions. Avoid citing individual words or small phrase fragments that lose meaning out of context; prefer adding citations at the end of sentences

  4. Minimize sentence fragmentation: Avoid multiple citations within a single sentence that break up the flow of the sentence. Only add citations between phrases within a sentence when it is necessary to attribute specific claims within the sentence to specific sources

  5. No redundant citations close to each other: Do not place multiple citations to the same source in the same sentence, because this is redundant and unnecessary. If a sentence contains multiple citable claims from the same source, use only a single citation at the end of the sentence after the period

</citations>

Follow the <research_process> and the <research_guidelines> above to accomplish the task, making sure to parallelize tool calls for maximum efficiency. Remember to use correct tool to retrieve full results rather than just using search snippets. Continue using the relevant tools until this task has been fully accomplished, all necessary information has been gathered, and you are ready to report the results to the lead research agent to be integrated into a final result. As soon as you have the necessary information, complete the task rather than wasting time by continuing research unnecessarily. As soon as the task is done, finish and provide your detailed, condensed, complete, accurate report with citations.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "arxiv", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "arxivName": "arxiv_search", + "arxivDescription": "Use this tool to search for academic papers on Arxiv. You can search by keywords, topics, authors, or specific Arxiv IDs. The tool can return either paper summaries or download and extract full paper content.", + "topKResults": "3", + "maxQueryLength": "300", + "docContentCharsMax": "5000", + "loadFullContent": true, + "continueOnFailure": true, + "legacyBuild": "", + "agentSelectedTool": "arxiv" + } + }, + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "credential": "", + "agentSelectedTool": "googleCustomSearch" + } + }, + { + "agentSelectedTool": "webScraperTool", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "scrapeMode": "recursive", + "maxDepth": 1, + "maxPages": "2", + "timeoutS": 60, + "description": "", + "agentSelectedTool": "webScraperTool" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

Research task:

{{ $iteration.task }}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "claude-sonnet-4-0", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "agentModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 213, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 180.35504258505125, + "y": 72.15493406552318 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 457.5784259377066, + "y": 83.96506302841382 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Writer Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": false + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": false + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are an expert research writer tasked with generating a high-quality, long-form Markdown report based on raw research findings. Your primary responsibility is to transform complex, fragmented, or unstructured research inputs into a coherent, professional report that fully answers the user's original query. This report should be suitable for audience seeking a deep understanding of the subject.

Your guiding principles:

  1. Preserve Full Context
    Include all relevant findings, explanations, and perspectives from the original materials. Do not omit, summarize, or oversimplify key information. Your job is to retain depth and nuance while improving structure and clarity.

  2. Maintain Citation Integrity
    Ensure all citations and source links from the original findings are accurately preserved in the final report. Do not invent, remove, or alter sources. If citations are embedded inline in the source findings, carry them forward appropriately.

  3. Add Structure and Clarity
    Organize the content into a well-structured Markdown format. Use clear section headings, bullet points, numbered lists, tables and formatting as needed to improve readability and flow. Start with Introduction, end with Conclusion, and lastly sources.

  4. Markdown Output Only
    Your final output must be in Markdown format. Do not include explanations, side notes, or appendices. The only output should be the fully composed report ready for submission.

Writing guidelines:

  1. Title: A clear, compelling title for the report that reflects the core subject.

  2. Abstract/Executive Summary: A concise overview (approx. 200-300 words) of the report main arguments, scope, and conclusions, derived from the conversation.

  3. Introduction:

  4. Main Body / Thematic Analysis (Multiple Sections):

  5. Synthesis of Insights and Key Conclusions:

  6. Implications and Future Directions:

  7. Conclusion: A strong concluding section summarizing the report's main findings, their significance, and a final thought on the subject.

Style and Tone:

" + }, + { + "role": "user", + "content": "

<research_topic>

{{ $form.query }}

</research_topic>

<existing_findings>

{{ $flow.state.findings }}

</existing_findings>

<new_findings>

{{ iterationAgentflow_0 }}

</new_findings>

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": false, + "agentReturnResponseAs": "userMessage", + "agentUpdateState": [ + { + "key": "findings", + "value": "

{{ output }}

" + } + ], + "agentModelConfig": { + "credential": "", + "modelName": "gemini-2.5-flash-preview-05-20", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "agentModel": "chatGoogleGenerativeAI" + }, + "undefined": "" + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 284, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 457.5784259377066, + "y": 83.96506302841382 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 186.43721235573946, + "y": -175.0715078328168 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Each SubAgent has its own research task and tools to complete its findings" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 210, + "height": 123, + "selected": false, + "positionAbsolute": { + "x": 186.43721235573946, + "y": -175.0715078328168 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": -117.00547059767304, + "y": -24.08438212240118 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Planner will generate list of subagents" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 210, + "height": 82, + "selected": false, + "positionAbsolute": { + "x": -117.00547059767304, + "y": -24.08438212240118 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": 775.5108094609307, + "y": 79.60273632963377 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "More SubAgents?", + "version": 1.1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": [ + "ConditionAgent" + ], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "More subagents needed" + }, + { + "scenario": "It is sufficient" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + }, + { + "label": "Override System Prompt", + "name": "conditionAgentOverrideSystemPrompt", + "type": "boolean", + "description": "Override initial system prompt for Condition Agent", + "optional": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentOverrideSystemPrompt-boolean", + "display": true + }, + { + "label": "Node System Prompt", + "name": "conditionAgentSystemPrompt", + "type": "string", + "rows": 4, + "optional": true, + "acceptVariable": true, + "default": "

You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios.

\n \n

Steps

\n
    \n
  1. Read the input string and the list of scenarios.
  2. \n
  3. Analyze the content of the input to identify its main topic or intention.
  4. \n
  5. Compare the input with each scenario: Evaluate how well the input's topic or intention aligns with each of the provided scenarios and select the one that is the best fit.
  6. \n
  7. Output the result: Return the selected scenario in the specified JSON format.
  8. \n
\n

Output Format

\n

Output should be a JSON object that names the selected scenario, like this: {\"output\": \"\"}. No explanation is needed.

\n

Examples

\n
    \n
  1. \n

    Input: {\"input\": \"Hello\", \"scenarios\": [\"user is asking about AI\", \"user is not asking about AI\"], \"instruction\": \"Your task is to check if the user is asking about AI.\"}

    \n

    Output: {\"output\": \"user is not asking about AI\"}

    \n
  2. \n
  3. \n

    Input: {\"input\": \"What is AIGC?\", \"scenarios\": [\"user is asking about AI\", \"user is asking about the weather\"], \"instruction\": \"Your task is to check and see if the user is asking a topic about AI.\"}

    \n

    Output: {\"output\": \"user is asking about AI\"}

    \n
  4. \n
  5. \n

    Input: {\"input\": \"Can you explain deep learning?\", \"scenarios\": [\"user is interested in AI topics\", \"user wants to order food\"], \"instruction\": \"Determine if the user is interested in learning about AI.\"}

    \n

    Output: {\"output\": \"user is interested in AI topics\"}

    \n
  6. \n
\n

Note

\n ", + "description": "Expert use only. Modifying this can significantly alter agent behavior. Leave default if unsure", + "show": { + "conditionAgentOverrideSystemPrompt": true + }, + "id": "conditionAgentAgentflow_0-input-conditionAgentSystemPrompt-string", + "display": false + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatGoogleGenerativeAI", + "conditionAgentInstructions": "

Given a research topic, previous subagents and their findings, determine if more subagents are needed for further research or the findings are sufficient for the research topic

", + "conditionAgentInput": "

<research_topic>

{{ $form.query }}

</research_topic>

<subagents>

{{ $flow.state.subagents }}

</subagents>

<findings>

{{ $flow.state.findings }}

</findings>

", + "conditionAgentScenarios": [ + { + "scenario": "More subagents are needed" + }, + { + "scenario": "Findings are sufficient" + } + ], + "conditionAgentOverrideSystemPrompt": "", + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gemini-2.0-flash-lite", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "conditionAgentModel": "chatGoogleGenerativeAI" + }, + "undefined": "" + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 220, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 775.5108094609307, + "y": 79.60273632963377 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 1041.3074957535728, + "y": 20.713295322365383 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Back to Planner", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": [ + "Loop" + ], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Planner", + "maxLoopCount": "5" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 174, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1041.3074957535728, + "y": 20.713295322365383 + }, + "dragging": false + }, + { + "id": "directReplyAgentflow_0", + "position": { + "x": 1046.735958385286, + "y": 140.25100072990062 + }, + "data": { + "id": "directReplyAgentflow_0", + "label": "Generate Report", + "version": 1, + "name": "directReplyAgentflow", + "type": "DirectReply", + "color": "#4DDBBB", + "hideOutput": true, + "baseClasses": [ + "DirectReply" + ], + "category": "Agent Flows", + "description": "Directly reply to the user with a message", + "inputParams": [ + { + "label": "Message", + "name": "directReplyMessage", + "type": "string", + "rows": 4, + "acceptVariable": true, + "id": "directReplyAgentflow_0-input-directReplyMessage-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "directReplyMessage": "

{{ $flow.state.findings }}

" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 179, + "height": 66, + "positionAbsolute": { + "x": 1046.735958385286, + "y": 140.25100072990062 + }, + "selected": false, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_3", + "position": { + "x": 494.1635881448354, + "y": -47.5842428829507 + }, + "data": { + "id": "stickyNoteAgentflow_3", + "label": "Sticky Note (3)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_3-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Write Agent combine the findings and generate an updated report" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_3-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 210, + "height": 123, + "selected": false, + "positionAbsolute": { + "x": 494.1635881448354, + "y": -47.5842428829507 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "iterationAgentflow_0", + "targetHandle": "iterationAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#9C89B8", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-iterationAgentflow_0-iterationAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#FFA07A", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "iterationAgentflow_0", + "sourceHandle": "iterationAgentflow_0-output-iterationAgentflow", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#9C89B8", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "iterationAgentflow_0-iterationAgentflow_0-output-iterationAgentflow-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "agentAgentflow_1", + "sourceHandle": "agentAgentflow_1-output-agentAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_1-agentAgentflow_1-output-agentAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "directReplyAgentflow_0", + "targetHandle": "directReplyAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DDBBB", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-directReplyAgentflow_0-directReplyAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/Deep Research Report.pdf b/fr/.gitbook/assets/Deep Research Report.pdf new file mode 100644 index 00000000..448b10f4 Binary files /dev/null and b/fr/.gitbook/assets/Deep Research Report.pdf differ diff --git a/fr/.gitbook/assets/Email Agent.json b/fr/.gitbook/assets/Email Agent.json new file mode 100644 index 00000000..82692712 --- /dev/null +++ b/fr/.gitbook/assets/Email Agent.json @@ -0,0 +1,602 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -201.62473061824977, + "y": 92.61621373702832 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": true + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": true + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": true + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "formInput", + "formTitle": "Email Inquiry", + "formDescription": "Incoming email inquiry", + "formInputTypes": [ + { + "type": "string", + "label": "Subject", + "name": "subject", + "addOptions": "" + }, + { + "type": "string", + "label": "Body", + "name": "body", + "addOptions": "" + }, + { + "type": "string", + "label": "From", + "name": "from", + "addOptions": "" + } + ], + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -201.62473061824977, + "y": 92.61621373702832 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": -61.56009223078007, + "y": 76 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Email Reply Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are a customer support agent working in Flowise Inc. Create a draft professional email reply to user's query. Use the web search tools to get more details about the prospect.

Always reply as Samantha, Customer Support Representative in Flowise. Dont use placeholders.

Today's date is {{ current_date_time }}.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + }, + { + "agentSelectedTool": "gmail", + "agentSelectedToolRequiresHumanInput": true, + "agentSelectedToolConfig": { + "gmailType": "drafts", + "draftActions": "[\"createDraft\"]", + "agentSelectedTool": "gmail" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 189, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": -61.56009223078007, + "y": 76 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-agentAgentflow_0-agentAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/Flowise Cropped Logo.svg b/fr/.gitbook/assets/Flowise Cropped Logo.svg new file mode 100644 index 00000000..2a93a744 --- /dev/null +++ b/fr/.gitbook/assets/Flowise Cropped Logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/fr/.gitbook/assets/Flowise Cropped White High Res.png b/fr/.gitbook/assets/Flowise Cropped White High Res.png new file mode 100644 index 00000000..8617457a Binary files /dev/null and b/fr/.gitbook/assets/Flowise Cropped White High Res.png differ diff --git a/fr/.gitbook/assets/Flowise Logo Cropped White High Res.png b/fr/.gitbook/assets/Flowise Logo Cropped White High Res.png new file mode 100644 index 00000000..b03f493b Binary files /dev/null and b/fr/.gitbook/assets/Flowise Logo Cropped White High Res.png differ diff --git a/fr/.gitbook/assets/FlowiseIntro (1).gif b/fr/.gitbook/assets/FlowiseIntro (1).gif new file mode 100644 index 00000000..5d8b7c12 Binary files /dev/null and b/fr/.gitbook/assets/FlowiseIntro (1).gif differ diff --git a/fr/.gitbook/assets/FlowiseIntro.gif b/fr/.gitbook/assets/FlowiseIntro.gif new file mode 100644 index 00000000..5d8b7c12 Binary files /dev/null and b/fr/.gitbook/assets/FlowiseIntro.gif differ diff --git a/fr/.gitbook/assets/Human In The Loop Agent.json b/fr/.gitbook/assets/Human In The Loop Agent.json new file mode 100644 index 00000000..96fc67ba --- /dev/null +++ b/fr/.gitbook/assets/Human In The Loop Agent.json @@ -0,0 +1,1315 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -201.62473061824977, + "y": 92.61621373702832 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": true + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": true + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": true + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "formInput", + "formTitle": "Email Inquiry", + "formDescription": "Incoming email inquiry", + "formInputTypes": [ + { + "type": "string", + "label": "Subject", + "name": "subject", + "addOptions": "" + }, + { + "type": "string", + "label": "Body", + "name": "body", + "addOptions": "" + }, + { + "type": "string", + "label": "From", + "name": "from", + "addOptions": "" + } + ], + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -201.62473061824977, + "y": 92.61621373702832 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": -61.56009223078007, + "y": 76 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Email Reply Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are a customer support agent working in Flowise Inc. Write a professional email reply to user's query. Use the web search tools to get more details about the prospect.

Always reply as Samantha, Customer Support Representative in Flowise. Dont use placeholders.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + }, + { + "agentSelectedTool": "currentDateTime", + "agentSelectedToolConfig": { + "agentSelectedTool": "currentDateTime" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 189, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": -61.56009223078007, + "y": 76 + }, + "dragging": false + }, + { + "id": "humanInputAgentflow_0", + "position": { + "x": 156.05666363734434, + "y": 86.62266545493773 + }, + "data": { + "id": "humanInputAgentflow_0", + "label": "Human Input 0", + "version": 1, + "name": "humanInputAgentflow", + "type": "HumanInput", + "color": "#6E6EFD", + "baseClasses": [ + "HumanInput" + ], + "category": "Agent Flows", + "description": "Request human input, approval or rejection during execution", + "inputParams": [ + { + "label": "Description Type", + "name": "humanInputDescriptionType", + "type": "options", + "options": [ + { + "label": "Fixed", + "name": "fixed", + "description": "Specify a fixed description" + }, + { + "label": "Dynamic", + "name": "dynamic", + "description": "Use LLM to generate a description" + } + ], + "id": "humanInputAgentflow_0-input-humanInputDescriptionType-options", + "display": true + }, + { + "label": "Description", + "name": "humanInputDescription", + "type": "string", + "placeholder": "Are you sure you want to proceed?", + "acceptVariable": true, + "rows": 4, + "show": { + "humanInputDescriptionType": "fixed" + }, + "id": "humanInputAgentflow_0-input-humanInputDescription-string", + "display": true + }, + { + "label": "Model", + "name": "humanInputModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "show": { + "humanInputDescriptionType": "dynamic" + }, + "id": "humanInputAgentflow_0-input-humanInputModel-asyncOptions", + "display": false + }, + { + "label": "Prompt", + "name": "humanInputModelPrompt", + "type": "string", + "default": "

Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback.

\n\n

Output Format The output should be structured in three parts in text:

\n\n", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4, + "show": { + "humanInputDescriptionType": "dynamic" + }, + "id": "humanInputAgentflow_0-input-humanInputModelPrompt-string", + "display": false + }, + { + "label": "Enable Feedback", + "name": "humanInputEnableFeedback", + "type": "boolean", + "default": true, + "id": "humanInputAgentflow_0-input-humanInputEnableFeedback-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "humanInputDescriptionType": "fixed", + "humanInputEnableFeedback": true, + "humanInputModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "humanInputModel": "chatOpenAI" + }, + "humanInputDescription": "

Are you sure you want to proceed?

" + }, + "outputAnchors": [ + { + "id": "humanInputAgentflow_0-output-0", + "label": "Human Input", + "name": "humanInputAgentflow" + }, + { + "id": "humanInputAgentflow_0-output-1", + "label": "Human Input", + "name": "humanInputAgentflow" + } + ], + "outputs": { + "humanInputAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 167, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 156.05666363734434, + "y": 86.62266545493773 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 392.1370040831033, + "y": 150.41190827718114 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop back to Agent", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": [ + "Loop" + ], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "agentAgentflow_0-Email Reply Agent", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 198, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 392.1370040831033, + "y": 150.41190827718114 + }, + "dragging": false + }, + { + "id": "toolAgentflow_0", + "position": { + "x": 607.0106274902857, + "y": 44.74028001269521 + }, + "data": { + "id": "toolAgentflow_0", + "label": "Send Email", + "version": 1.1, + "name": "toolAgentflow", + "type": "Tool", + "color": "#d4a373", + "baseClasses": [ + "Tool" + ], + "category": "Agent Flows", + "description": "Tools allow LLM to interact with external systems", + "inputParams": [ + { + "label": "Tool", + "name": "toolAgentflowSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true, + "id": "toolAgentflow_0-input-toolAgentflowSelectedTool-asyncOptions", + "display": true + }, + { + "label": "Tool Input Arguments", + "name": "toolInputArgs", + "type": "array", + "acceptVariable": true, + "refresh": true, + "array": [ + { + "label": "Input Argument Name", + "name": "inputArgName", + "type": "asyncOptions", + "loadMethod": "listToolInputArgs", + "refresh": true + }, + { + "label": "Input Argument Value", + "name": "inputArgValue", + "type": "string", + "acceptVariable": true + } + ], + "show": { + "toolAgentflowSelectedTool": ".+" + }, + "id": "toolAgentflow_0-input-toolInputArgs-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "toolUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "toolAgentflow_0-input-toolUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "toolAgentflowSelectedTool": "gmail", + "toolInputArgs": [ + { + "inputArgName": "to", + "inputArgValue": "

{{ $form.from }}

" + }, + { + "inputArgName": "subject", + "inputArgValue": "

{{ llmAgentflow_0.output.subject }}

" + }, + { + "inputArgName": "body", + "inputArgValue": "

{{ llmAgentflow_0.output.body }}

" + } + ], + "toolUpdateState": "", + "toolAgentflowSelectedToolConfig": { + "gmailType": "messages", + "messageActions": "[\"sendMessage\"]", + "toolAgentflowSelectedTool": "gmail" + }, + "undefined": "" + }, + "outputAnchors": [ + { + "id": "toolAgentflow_0-output-toolAgentflow", + "label": "Tool", + "name": "toolAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 143, + "height": 68, + "selected": false, + "positionAbsolute": { + "x": 607.0106274902857, + "y": 44.74028001269521 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 368.9022119252032, + "y": 43.50583396320786 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Email Subject & Body", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "subject", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "Subject of the email" + }, + { + "key": "body", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "Body of the email" + } + ], + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 209, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 368.9022119252032, + "y": 43.50583396320786 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "agentAgentflow_0", + "sourceHandle": "agentAgentflow_0-output-agentAgentflow", + "target": "humanInputAgentflow_0", + "targetHandle": "humanInputAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#6E6EFD", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_0-agentAgentflow_0-output-agentAgentflow-humanInputAgentflow_0-humanInputAgentflow_0" + }, + { + "source": "humanInputAgentflow_0", + "sourceHandle": "humanInputAgentflow_0-output-1", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#6E6EFD", + "targetColor": "#FFA07A", + "edgeLabel": "reject", + "isHumanInput": true + }, + "type": "agentFlow", + "id": "humanInputAgentflow_0-humanInputAgentflow_0-output-1-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "humanInputAgentflow_0", + "sourceHandle": "humanInputAgentflow_0-output-0", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#6E6EFD", + "targetColor": "#64B5F6", + "edgeLabel": "proceed", + "isHumanInput": true + }, + "type": "agentFlow", + "id": "humanInputAgentflow_0-humanInputAgentflow_0-output-0-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "toolAgentflow_0", + "targetHandle": "toolAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#d4a373", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-toolAgentflow_0-toolAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/OpenAPI Chatflow.json b/fr/.gitbook/assets/OpenAPI Chatflow.json new file mode 100644 index 00000000..7c4f9d1c --- /dev/null +++ b/fr/.gitbook/assets/OpenAPI Chatflow.json @@ -0,0 +1,868 @@ +{ + "nodes": [ + { + "id": "openAIToolAgent_0", + "position": { + "x": 2043.452556953855, + "y": 419.243930407276 + }, + "type": "customNode", + "data": { + "id": "openAIToolAgent_0", + "label": "OpenAI Tool Agent", + "version": 1, + "name": "openAIToolAgent", + "type": "AgentExecutor", + "baseClasses": [ + "AgentExecutor", + "BaseChain", + "Runnable" + ], + "category": "Agents", + "description": "Agent that uses OpenAI Function Calling to pick the tools and args to call", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessage", + "type": "string", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "openAIToolAgent_0-input-systemMessage-string" + } + ], + "inputAnchors": [ + { + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "openAIToolAgent_0-input-tools-Tool" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "openAIToolAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "OpenAI/Azure Chat Model", + "name": "model", + "type": "BaseChatModel", + "id": "openAIToolAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openAIToolAgent_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "tools": [ + "{{chainTool_0.data.instance}}" + ], + "memory": "{{bufferMemory_0.data.instance}}", + "model": "{{chatOpenAI_1.data.instance}}", + "systemMessage": "", + "inputModeration": "" + }, + "outputAnchors": [ + { + "id": "openAIToolAgent_0-output-openAIToolAgent-AgentExecutor|BaseChain|Runnable", + "name": "openAIToolAgent", + "label": "AgentExecutor", + "description": "Agent that uses OpenAI Function Calling to pick the tools and args to call", + "type": "AgentExecutor | BaseChain | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 434, + "selected": false, + "positionAbsolute": { + "x": 2043.452556953855, + "y": 419.243930407276 + }, + "dragging": false + }, + { + "id": "chatOpenAI_1", + "position": { + "x": 1172.9727392624852, + "y": 531.4096673286917 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_1", + "label": "ChatOpenAI", + "version": 5, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-1106-vision-preview", + "name": "gpt-4-1106-vision-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_1-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 669, + "selected": false, + "positionAbsolute": { + "x": 1172.9727392624852, + "y": 531.4096673286917 + }, + "dragging": false + }, + { + "id": "bufferMemory_0", + "position": { + "x": 748.796752834334, + "y": 770.3068397228885 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 1, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": [ + "BufferMemory", + "BaseChatMemory", + "BaseMemory" + ], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "description": "Remembers previous conversational back and forths directly", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 376, + "selected": false, + "positionAbsolute": { + "x": 748.796752834334, + "y": 770.3068397228885 + }, + "dragging": false + }, + { + "id": "openApiChain_0", + "position": { + "x": 1145.3060530569107, + "y": -103.11418698407195 + }, + "type": "customNode", + "data": { + "id": "openApiChain_0", + "label": "OpenAPI Chain", + "version": 2, + "name": "openApiChain", + "type": "OpenAPIChain", + "baseClasses": [ + "OpenAPIChain", + "BaseChain", + "Runnable" + ], + "category": "Chains", + "description": "Chain that automatically select and call APIs based only on an OpenAPI spec", + "inputParams": [ + { + "label": "YAML Link", + "name": "yamlLink", + "type": "string", + "placeholder": "https://api.speak.com/openapi.yaml", + "description": "If YAML link is provided, uploaded YAML File will be ignored and YAML link will be used instead", + "id": "openApiChain_0-input-yamlLink-string" + }, + { + "label": "YAML File", + "name": "yamlFile", + "type": "file", + "fileType": ".yaml", + "description": "If YAML link is provided, uploaded YAML File will be ignored and YAML link will be used instead", + "id": "openApiChain_0-input-yamlFile-file" + }, + { + "label": "Headers", + "name": "headers", + "type": "json", + "additionalParams": true, + "optional": true, + "id": "openApiChain_0-input-headers-json" + } + ], + "inputAnchors": [ + { + "label": "ChatOpenAI Model", + "name": "model", + "type": "ChatOpenAI", + "id": "openApiChain_0-input-model-ChatOpenAI" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openApiChain_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_2.data.instance}}", + "yamlLink": "", + "headers": "", + "inputModeration": "" + }, + "outputAnchors": [ + { + "id": "openApiChain_0-output-openApiChain-OpenAPIChain|BaseChain|Runnable", + "name": "openApiChain", + "label": "OpenAPIChain", + "description": "Chain that automatically select and call APIs based only on an OpenAPI spec", + "type": "OpenAPIChain | BaseChain | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 561, + "selected": false, + "positionAbsolute": { + "x": 1145.3060530569107, + "y": -103.11418698407195 + }, + "dragging": false + }, + { + "id": "chatOpenAI_2", + "position": { + "x": 759.2922754505641, + "y": -147.3984427366269 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_2", + "label": "ChatOpenAI", + "version": 5, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_2-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-1106-vision-preview", + "name": "gpt-4-1106-vision-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_2-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_2-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_2-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 669, + "selected": false, + "positionAbsolute": { + "x": 759.2922754505641, + "y": -147.3984427366269 + }, + "dragging": false + }, + { + "id": "chainTool_0", + "position": { + "x": 1521.7629525254497, + "y": -103.11418698407192 + }, + "type": "customNode", + "data": { + "id": "chainTool_0", + "label": "Chain Tool", + "version": 1, + "name": "chainTool", + "type": "ChainTool", + "baseClasses": [ + "ChainTool", + "DynamicTool", + "Tool", + "StructuredTool", + "Runnable" + ], + "category": "Tools", + "description": "Use a chain as allowed tool for agent", + "inputParams": [ + { + "label": "Chain Name", + "name": "name", + "type": "string", + "placeholder": "state-of-union-qa", + "id": "chainTool_0-input-name-string" + }, + { + "label": "Chain Description", + "name": "description", + "type": "string", + "rows": 3, + "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", + "id": "chainTool_0-input-description-string" + }, + { + "label": "Return Direct", + "name": "returnDirect", + "type": "boolean", + "optional": true, + "id": "chainTool_0-input-returnDirect-boolean" + } + ], + "inputAnchors": [ + { + "label": "Base Chain", + "name": "baseChain", + "type": "BaseChain", + "id": "chainTool_0-input-baseChain-BaseChain" + } + ], + "inputs": { + "name": "search_shirt", + "description": "useful when you need to search and return answer about tshirts", + "returnDirect": false, + "baseChain": "{{openApiChain_0.data.instance}}" + }, + "outputAnchors": [ + { + "id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|Runnable", + "name": "chainTool", + "label": "ChainTool", + "description": "Use a chain as allowed tool for agent", + "type": "ChainTool | DynamicTool | Tool | StructuredTool | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 602, + "selected": false, + "positionAbsolute": { + "x": 1521.7629525254497, + "y": -103.11418698407192 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "chatOpenAI_1", + "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "openAIToolAgent_0", + "targetHandle": "openAIToolAgent_0-input-model-BaseChatModel", + "type": "buttonedge", + "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-openAIToolAgent_0-openAIToolAgent_0-input-model-BaseChatModel" + }, + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "openAIToolAgent_0", + "targetHandle": "openAIToolAgent_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-openAIToolAgent_0-openAIToolAgent_0-input-memory-BaseChatMemory" + }, + { + "source": "chatOpenAI_2", + "sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "openApiChain_0", + "targetHandle": "openApiChain_0-input-model-ChatOpenAI", + "type": "buttonedge", + "id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-openApiChain_0-openApiChain_0-input-model-ChatOpenAI" + }, + { + "source": "openApiChain_0", + "sourceHandle": "openApiChain_0-output-openApiChain-OpenAPIChain|BaseChain|Runnable", + "target": "chainTool_0", + "targetHandle": "chainTool_0-input-baseChain-BaseChain", + "type": "buttonedge", + "id": "openApiChain_0-openApiChain_0-output-openApiChain-OpenAPIChain|BaseChain|Runnable-chainTool_0-chainTool_0-input-baseChain-BaseChain" + }, + { + "source": "chainTool_0", + "sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|Runnable", + "target": "openAIToolAgent_0", + "targetHandle": "openAIToolAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|Runnable-openAIToolAgent_0-openAIToolAgent_0-input-tools-Tool" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/OpenAPI Toolkit with ToolAgent Chatflow.json b/fr/.gitbook/assets/OpenAPI Toolkit with ToolAgent Chatflow.json new file mode 100644 index 00000000..f83cc3b0 --- /dev/null +++ b/fr/.gitbook/assets/OpenAPI Toolkit with ToolAgent Chatflow.json @@ -0,0 +1,491 @@ +{ + "nodes": [ + { + "id": "toolAgent_0", + "position": { + "x": 1293.1879986131244, + "y": 191.1741937636126 + }, + "type": "customNode", + "data": { + "id": "toolAgent_0", + "label": "Tool Agent", + "version": 2, + "name": "toolAgent", + "type": "AgentExecutor", + "baseClasses": [ + "AgentExecutor", + "BaseChain", + "Runnable" + ], + "category": "Agents", + "description": "Agent that uses Function Calling to pick the tools and args to call", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessage", + "type": "string", + "default": "You are a helpful AI assistant.", + "description": "If Chat Prompt Template is provided, this will be ignored", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "toolAgent_0-input-systemMessage-string" + }, + { + "label": "Max Iterations", + "name": "maxIterations", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "toolAgent_0-input-maxIterations-number" + } + ], + "inputAnchors": [ + { + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "toolAgent_0-input-tools-Tool" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "toolAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Tool Calling Chat Model", + "name": "model", + "type": "BaseChatModel", + "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", + "id": "toolAgent_0-input-model-BaseChatModel" + }, + { + "label": "Chat Prompt Template", + "name": "chatPromptTemplate", + "type": "ChatPromptTemplate", + "description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable", + "optional": true, + "id": "toolAgent_0-input-chatPromptTemplate-ChatPromptTemplate" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "toolAgent_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "tools": [ + "{{openAPIToolkit_0.data.instance}}" + ], + "memory": "{{bufferMemory_0.data.instance}}", + "model": "{{chatOpenAI_0.data.instance}}", + "chatPromptTemplate": "", + "systemMessage": "You are a helpful AI assistant.", + "inputModeration": "", + "maxIterations": "" + }, + "outputAnchors": [ + { + "id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable", + "name": "toolAgent", + "label": "AgentExecutor", + "description": "Agent that uses Function Calling to pick the tools and args to call", + "type": "AgentExecutor | BaseChain | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 484, + "selected": false, + "positionAbsolute": { + "x": 1293.1879986131244, + "y": 191.1741937636126 + }, + "dragging": false + }, + { + "id": "bufferMemory_0", + "position": { + "x": 828.6997922809826, + "y": 483.14472669268787 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 2, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": [ + "BufferMemory", + "BaseChatMemory", + "BaseMemory" + ], + "category": "Memory", + "description": "Retrieve chat messages stored in database", + "inputParams": [ + { + "label": "Session Id", + "name": "sessionId", + "type": "string", + "description": "If not specified, a random id will be used. Learn more", + "default": "", + "additionalParams": true, + "optional": true, + "id": "bufferMemory_0-input-sessionId-string" + }, + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "additionalParams": true, + "id": "bufferMemory_0-input-memoryKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "sessionId": "", + "memoryKey": "chat_history" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "description": "Retrieve chat messages stored in database", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 251, + "selected": false, + "positionAbsolute": { + "x": 828.6997922809826, + "y": 483.14472669268787 + }, + "dragging": false + }, + { + "id": "chatOpenAI_0", + "position": { + "x": 427.0142725311101, + "y": -43.8666581648755 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "version": 7, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "gpt-3.5-turbo", + "id": "chatOpenAI_0-input-modelName-asyncOptions" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-basepath-string" + }, + { + "label": "Proxy Url", + "name": "proxyUrl", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-proxyUrl-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, Conversational Agent, Tool Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo", + "temperature": "0.9", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 669, + "selected": false, + "positionAbsolute": { + "x": 427.0142725311101, + "y": -43.8666581648755 + }, + "dragging": false + }, + { + "id": "openAPIToolkit_0", + "position": { + "x": 831.9599233268808, + "y": -32.87251462719371 + }, + "type": "customNode", + "data": { + "id": "openAPIToolkit_0", + "label": "OpenAPI Toolkit", + "version": 2, + "name": "openAPIToolkit", + "type": "OpenAPIToolkit", + "baseClasses": [ + "OpenAPIToolkit", + "Tool" + ], + "category": "Tools", + "description": "Load OpenAPI specification, and converts each API endpoint to a tool", + "inputParams": [ + { + "label": "YAML File", + "name": "yamlFile", + "type": "file", + "fileType": ".yaml", + "id": "openAPIToolkit_0-input-yamlFile-file" + }, + { + "label": "Return Direct", + "name": "returnDirect", + "description": "Return the output of the tool directly to the user", + "type": "boolean", + "optional": true, + "id": "openAPIToolkit_0-input-returnDirect-boolean" + }, + { + "label": "Headers", + "name": "headers", + "type": "json", + "description": "Request headers to be sent with the API request. For example, {\"Authorization\": \"Bearer token\"}", + "additionalParams": true, + "optional": true, + "id": "openAPIToolkit_0-input-headers-json" + }, + { + "label": "Custom Code", + "name": "customCode", + "type": "code", + "hint": { + "label": "How to use", + "value": "- **Libraries:** \n You can use any libraries imported in Flowise.\n\n- **Tool Input Arguments:** \n Tool input arguments are available as the following variables:\n - `$PathParameters`\n - `$QueryParameters`\n - `$RequestBody`\n\n- **HTTP Requests:** \n By default, you can get the following values for making HTTP requests:\n - `$url`\n - `$options`\n\n- **Default Flow Config:** \n You can access the default flow configuration using these variables:\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n- **Custom Variables:** \n You can get custom variables using the syntax:\n - `$vars.`\n\n- **Return Value:** \n The function must return a **string** value at the end.\n\n```js\nconst fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n\n```\n" + }, + "codeExample": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n", + "description": "Custom code to return the output of the tool. The code should be a function that takes in the input and returns a string", + "hideCodeExecute": true, + "default": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n", + "additionalParams": true, + "id": "openAPIToolkit_0-input-customCode-code" + } + ], + "inputAnchors": [], + "inputs": { + "returnDirect": "", + "headers": "{\"Authorization\":\"Bearer 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\"}", + "customCode": "const fetch = require('node-fetch');\nconst url = $url;\nconsole.log('url=', url);\nconst options = $options;\nconsole.log('options=', options);\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n console.log('resp=', resp);\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n" + }, + "outputAnchors": [ + { + "id": "openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool", + "name": "openAPIToolkit", + "label": "OpenAPIToolkit", + "description": "Load OpenAPI specification, and converts each API endpoint to a tool", + "type": "OpenAPIToolkit | Tool" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 457, + "selected": false, + "positionAbsolute": { + "x": 831.9599233268808, + "y": -32.87251462719371 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-toolAgent_0-toolAgent_0-input-memory-BaseChatMemory" + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-model-BaseChatModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-toolAgent_0-toolAgent_0-input-model-BaseChatModel" + }, + { + "source": "openAPIToolkit_0", + "sourceHandle": "openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "openAPIToolkit_0-openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool-toolAgent_0-toolAgent_0-input-tools-Tool" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/Picture1 (1).png b/fr/.gitbook/assets/Picture1 (1).png new file mode 100644 index 00000000..f1936b4c Binary files /dev/null and b/fr/.gitbook/assets/Picture1 (1).png differ diff --git a/fr/.gitbook/assets/Picture1.png b/fr/.gitbook/assets/Picture1.png new file mode 100644 index 00000000..6d851c87 Binary files /dev/null and b/fr/.gitbook/assets/Picture1.png differ diff --git a/fr/.gitbook/assets/Picture7.png b/fr/.gitbook/assets/Picture7.png new file mode 100644 index 00000000..a3e9a0f4 Binary files /dev/null and b/fr/.gitbook/assets/Picture7.png differ diff --git a/fr/.gitbook/assets/Requests Tool Agent.json b/fr/.gitbook/assets/Requests Tool Agent.json new file mode 100644 index 00000000..ecd911d3 --- /dev/null +++ b/fr/.gitbook/assets/Requests Tool Agent.json @@ -0,0 +1,626 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": 100, + "y": 100 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startEphemeralMemory": "", + "startState": "", + "startPersistState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "positionAbsolute": { + "x": 100, + "y": 100 + }, + "selected": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 240, + "y": 83.5 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Parent Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are helpful assistant.

Todays date time is {{ current_date_time }}

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "requestsGet", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "requestsGetUrl": "

http://localhost:5566/events

", + "requestsGetName": "get_events", + "requestsGetDescription": "Use this when you need to get events", + "requestsGetHeaders": "", + "requestsGetQueryParamsSchema": "{\n \"id\": {\n \"type\": \"string\",\n \"in\": \"path\",\n \"description\": \"ID of the item to get. /:id\"\n },\n \"limit\": {\n \"type\": \"string\",\n \"in\": \"query\",\n \"description\": \"Limit the number of items to get. ?limit=10\"\n }\n}", + "requestsGetMaxOutputLength": "2000", + "agentSelectedTool": "requestsGet" + } + }, + { + "agentSelectedTool": "requestsPost", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "requestsPostUrl": "

http://localhost:5566/events

", + "requestsPostName": "create_event", + "requestsPostDescription": "Use this when you want to create a new event", + "requestsPostHeaders": "", + "requestPostBody": "", + "requestsPostBodySchema": "{\n \"name\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Name of the event\"\n },\n \"date\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Date of the event\"\n },\n \"location\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Location of the event\"\n }\n}", + "requestsPostMaxOutputLength": "2000", + "agentSelectedTool": "requestsPost" + } + }, + { + "agentSelectedTool": "requestsPut", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "requestsPutUrl": "

http://localhost:5566/events

", + "requestsPutName": "update_event", + "requestsPutDescription": "Use this when you want to update an event", + "requestsPutHeaders": "", + "requestPutBody": "", + "requestsPutBodySchema": "{\n \"name\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Name of the event\"\n },\n \"date\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Date of the event\"\n },\n \"location\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Location of the event\"\n }\n}", + "requestsPutMaxOutputLength": "2000", + "agentSelectedTool": "requestsPut" + } + }, + { + "agentSelectedTool": "requestsDelete", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "requestsDeleteUrl": "

http://localhost:5566/events

", + "requestsDeleteName": "delete_event", + "requestsDeleteDescription": "Use this when you need to delete event", + "requestsDeleteHeaders": "", + "requestsDeleteQueryParamsSchema": "{\n \"id\": {\n \"type\": \"string\",\n \"required\": true,\n \"in\": \"path\",\n \"description\": \"ID of the item to delete. /:id\"\n }\n}", + "requestsDeleteMaxOutputLength": "2000", + "agentSelectedTool": "requestsDelete" + } + } + ], + "agentKnowledgeDocumentStores": [], + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 176, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 240, + "y": 83.5 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-agentAgentflow_0-agentAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/SQL Agent.json b/fr/.gitbook/assets/SQL Agent.json new file mode 100644 index 00000000..70f5d6b4 --- /dev/null +++ b/fr/.gitbook/assets/SQL Agent.json @@ -0,0 +1,2078 @@ +{ + "description": "An agent that can perform question answering over a database", + "usecases": ["SQL"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -97, + "y": 108 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startEphemeralMemory": "", + "startState": [ + { + "key": "sqlQuery", + "value": "" + } + ], + "startPersistState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -97, + "y": 108 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_0", + "position": { + "x": 58.5, + "y": 109 + }, + "data": { + "id": "customFunctionAgentflow_0", + "label": "Get DB Schema", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get custom variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_0-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": "", + "customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\n\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nlet sqlSchemaPrompt = '';\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nasync function getSQLPrompt() {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n // Get all user-defined tables (excluding system tables)\n const tablesResult = await queryRunner.query(`\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public' AND table_type = 'BASE TABLE'\n `);\n\n for (const tableRow of tablesResult) {\n const tableName = tableRow.table_name;\n\n const schemaInfo = await queryRunner.query(`\n SELECT column_name, data_type, is_nullable\n FROM information_schema.columns\n WHERE table_name = '${tableName}'\n `);\n\n const createColumns = [];\n const columnNames = [];\n\n for (const column of schemaInfo) {\n const name = column.column_name;\n const type = column.data_type.toUpperCase();\n const notNull = column.is_nullable === 'NO' ? 'NOT NULL' : '';\n columnNames.push(name);\n createColumns.push(`${name} ${type} ${notNull}`);\n }\n\n const sqlCreateTableQuery = `CREATE TABLE ${tableName} (${createColumns.join(', ')})`;\n const sqlSelectTableQuery = `SELECT * FROM ${tableName} LIMIT 3`;\n\n let allValues = [];\n try {\n const rows = await queryRunner.query(sqlSelectTableQuery);\n\n allValues = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n } catch (err) {\n allValues.push('[ERROR FETCHING ROWS]');\n }\n\n sqlSchemaPrompt +=\n sqlCreateTableQuery +\n '\\n' +\n sqlSelectTableQuery +\n '\\n' +\n columnNames.join(' ') +\n '\\n' +\n allValues.join('\\n') +\n '\\n\\n';\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error(err);\n throw err;\n }\n}\n\nasync function main() {\n await getSQLPrompt();\n}\n\nawait main();\n\nreturn sqlSchemaPrompt;\n", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_0-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 173, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 58.5, + "y": 109 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 272.7184381707814, + "y": 106.61165168988839 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Generate SQL Query", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatAnthropic", + "llmMessages": [ + { + "role": "system", + "content": "

You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct sqlite query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 5 results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for the relevant columns given the question. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.

Here is the relevant table info:

{{ customFunctionAgentflow_0 }}

Note:

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "sql_query", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "SQL query" + } + ], + "llmUpdateState": [ + { + "key": "sqlQuery", + "value": "

{{ output.sql_query }}

" + } + ], + "llmModelConfig": { + "credential": "", + "modelName": "claude-sonnet-4-0", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "llmModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 213, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 272.7184381707814, + "y": 106.61165168988839 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": 511.16504493033483, + "y": 101.98220225318451 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "Check SQL Query", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": [ + "ConditionAgent" + ], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "SQL query is correct and does not contains mistakes" + }, + { + "scenario": "SQL query contains mistakes" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "

You are a SQL expert with a strong attention to detail. Double check the SQL query for common mistakes, including:

- Using NOT IN with NULL values

- Using UNION when UNION ALL should have been used

- Using BETWEEN for exclusive ranges

- Data type mismatch in predicates

- Properly quoting identifiers

- Using the correct number of arguments for functions

- Casting to the correct data type

- Using the proper columns for joins

", + "conditionAgentInput": "

{{ $flow.state.sqlQuery }}

", + "conditionAgentScenarios": [ + { + "scenario": "SQL query is correct and does not contains mistakes" + }, + { + "scenario": "SQL query contains mistakes" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 187, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 511.16504493033483, + "y": 101.98220225318451 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 762.44734302386, + "y": 182.95996068910745 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Regenerate Query", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": [ + "Loop" + ], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Generate SQL Query", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 762.44734302386, + "y": 182.95996068910745 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_1", + "position": { + "x": 761.3261621815544, + "y": 44.65096212173265 + }, + "data": { + "id": "customFunctionAgentflow_1", + "label": "Run SQL Query", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_1-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get custom variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_1-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_1-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "sqlQuery", + "variableValue": "

{{ $flow.state.sqlQuery }}

" + } + ], + "customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\n\n// Configuration\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nconst sqlQuery = $sqlQuery;\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nlet formattedResult = '';\n\nasync function runSQLQuery(query) {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n const rows = await queryRunner.query(query);\n console.log('rows =', rows);\n\n if (rows.length === 0) {\n formattedResult = '[No results returned]';\n } else {\n const columnNames = Object.keys(rows[0]);\n const header = columnNames.join(' ');\n const values = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n\n formattedResult = query + '\\n' + header + '\\n' + values.join('\\n');\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error('[ERROR]', err);\n formattedResult = `[Error executing query]: ${err}`;\n }\n\n return formattedResult;\n}\n\nasync function main() {\n formattedResult = await runSQLQuery(sqlQuery);\n}\n\nawait main();\n\nreturn formattedResult;\n", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_1-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 171, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 761.3261621815544, + "y": 44.65096212173265 + }, + "dragging": false + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 1238.7660285501179, + "y": 20.56658816269558 + }, + "data": { + "id": "llmAgentflow_1", + "label": "Return Response", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatGoogleGenerativeAI", + "llmMessages": [], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

{{ customFunctionAgentflow_1 }}

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "llmModel": "chatGoogleGenerativeAI" + }, + "undefined": "" + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 1238.7660285501179, + "y": 20.56658816269558 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_1", + "position": { + "x": 966.5436041632489, + "y": 57.77868724229256 + }, + "data": { + "id": "conditionAgentAgentflow_1", + "label": "Check Result", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": [ + "ConditionAgent" + ], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_1-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_1-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_1-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "Result is correct and does not contains error" + }, + { + "scenario": "Result query contains error" + } + ], + "id": "conditionAgentAgentflow_1-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatMistralAI", + "conditionAgentInstructions": "

You are a SQL expert. Check if the query result is correct or contains error.

", + "conditionAgentInput": "

{{ customFunctionAgentflow_1 }}

", + "conditionAgentScenarios": [ + { + "scenario": "Result is correct and does not contains error" + }, + { + "scenario": "Result query contains error" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "mistral-medium-latest", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "randomSeed": "", + "safeMode": "", + "overrideEndpoint": "", + "conditionAgentModel": "chatMistralAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_1-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_1-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 228, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 966.5436041632489, + "y": 57.77868724229256 + }, + "dragging": false + }, + { + "id": "loopAgentflow_1", + "position": { + "x": 1501.0055934843515, + "y": 140.83809747682727 + }, + "data": { + "id": "loopAgentflow_1", + "label": "Recheck SQL Query", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": [ + "Loop" + ], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_1-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_1-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "conditionAgentAgentflow_0-Check SQL Query", + "maxLoopCount": 5, + "undefined": "" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 202, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1501.0055934843515, + "y": 140.83809747682727 + }, + "dragging": false + }, + { + "id": "llmAgentflow_2", + "position": { + "x": 1235.4868883628933, + "y": 137.82100195002667 + }, + "data": { + "id": "llmAgentflow_2", + "label": "Regenerate SQL Query", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_2-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_2-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_2-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_2-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_2-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_2-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_2-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_2-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatAnthropic", + "llmMessages": [ + { + "role": "system", + "content": "

You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct sqlite query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 5 results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for the relevant columns given the question. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.

Here is the relevant table info:

{{ customFunctionAgentflow_0 }}

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Given the generated SQL Query: {{ $flow.state.sqlQuery }}

I have the following error: {{ customFunctionAgentflow_1 }}

Regenerate a new SQL Query that will fix the error

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "sql_query", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "SQL query" + } + ], + "llmUpdateState": [ + { + "key": "sqlQuery", + "value": "

{{ output.sql_query }}

" + } + ], + "llmModelConfig": { + "credential": "", + "modelName": "claude-sonnet-4-0", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "llmModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_2-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 220, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 1235.4868883628933, + "y": 137.82100195002667 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 973.4435331695138, + "y": 156.551869199512 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "This is an auto correct mechanism that regenerate sql query if result contains error" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 210, + "height": 123, + "selected": false, + "positionAbsolute": { + "x": 973.4435331695138, + "y": 156.551869199512 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": 514.8377809033279, + "y": 200.97994630025966 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Check if generated SQL query contains errors/mistakes, if yes - regenerate" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 210, + "height": 123, + "selected": false, + "positionAbsolute": { + "x": 514.8377809033279, + "y": 200.97994630025966 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_2", + "position": { + "x": 40.21835449345774, + "y": 6.978337213146034 + }, + "data": { + "id": "stickyNoteAgentflow_2", + "label": "Sticky Note (1) (2)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_2-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Retrieve database schema" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_2-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 210, + "height": 82, + "selected": false, + "positionAbsolute": { + "x": 40.21835449345774, + "y": 6.978337213146034 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "customFunctionAgentflow_0", + "targetHandle": "customFunctionAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-customFunctionAgentflow_0-customFunctionAgentflow_0" + }, + { + "source": "customFunctionAgentflow_0", + "sourceHandle": "customFunctionAgentflow_0-output-customFunctionAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_0-customFunctionAgentflow_0-output-customFunctionAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "customFunctionAgentflow_1", + "targetHandle": "customFunctionAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#E4B7FF", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-customFunctionAgentflow_1-customFunctionAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#FFA07A", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "customFunctionAgentflow_1", + "sourceHandle": "customFunctionAgentflow_1-output-customFunctionAgentflow", + "target": "conditionAgentAgentflow_1", + "targetHandle": "conditionAgentAgentflow_1", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_1-customFunctionAgentflow_1-output-customFunctionAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-0", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-0-llmAgentflow_1-llmAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-1", + "target": "llmAgentflow_2", + "targetHandle": "llmAgentflow_2", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-1-llmAgentflow_2-llmAgentflow_2" + }, + { + "source": "llmAgentflow_2", + "sourceHandle": "llmAgentflow_2-output-llmAgentflow", + "target": "loopAgentflow_1", + "targetHandle": "loopAgentflow_1", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_2-llmAgentflow_2-output-llmAgentflow-loopAgentflow_1-loopAgentflow_1" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/SQL Chatflow (1).json b/fr/.gitbook/assets/SQL Chatflow (1).json new file mode 100644 index 00000000..89bb5965 --- /dev/null +++ b/fr/.gitbook/assets/SQL Chatflow (1).json @@ -0,0 +1,2087 @@ +{ + "nodes": [ + { + "width": 300, + "height": 511, + "id": "promptTemplate_0", + "position": { + "x": 384.4880563109088, + "y": 253.48974179902635 + }, + "type": "customNode", + "data": { + "id": "promptTemplate_0", + "label": "Prompt Template", + "version": 1, + "name": "promptTemplate", + "type": "PromptTemplate", + "baseClasses": [ + "PromptTemplate", + "BaseStringPromptTemplate", + "BasePromptTemplate", + "Runnable" + ], + "category": "Prompts", + "description": "Schema to represent a basic prompt for an LLM", + "inputParams": [ + { + "label": "Template", + "name": "template", + "type": "string", + "rows": 4, + "placeholder": "What is a good name for a company that makes {product}?", + "id": "promptTemplate_0-input-template-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "promptTemplate_0-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "template": "You are a MySQL expert. Given an input question, create a syntactically correct MySQL query to run.\nUnless otherwise specified, do not return more than {topK} rows.\n\nHere is the relevant table info:\n{schema}\n\nBelow are a number of examples of questions and their corresponding SQL queries.\n\nUser input: List all artists.\nSQL Query: SELECT * FROM Artist;\n\nUser input: Find all albums for the artist 'AC/DC'.\nSQL Query: SELECT * FROM Album WHERE ArtistId = (SELECT ArtistId FROM Artist WHERE Name = 'AC/DC');\n\nUser input: List all tracks in the 'Rock' genre.\nSQL Query: SELECT * FROM Track WHERE GenreId = (SELECT GenreId FROM Genre WHERE Name = 'Rock');\n\nUser input: Find the total duration of all tracks.\nSQL Query: SELECT SUM(Milliseconds) FROM Track;\n\nUser input: List all customers from Canada.\nSQL Query: SELECT * FROM Customer WHERE Country = 'Canada';\n\nUser input: {question}\nSQL query:", + "promptValues": "{\"schema\":\"{{customFunction_2.data.instance}}\",\"question\":\"{{question}}\",\"topK\":3}" + }, + "outputAnchors": [ + { + "id": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "name": "promptTemplate", + "label": "PromptTemplate", + "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 384.4880563109088, + "y": 253.48974179902635 + }, + "dragging": false + }, + { + "width": 300, + "height": 507, + "id": "llmChain_0", + "position": { + "x": 770.4559230968546, + "y": -127.11351409346554 + }, + "type": "customNode", + "data": { + "id": "llmChain_0", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": [ + "LLMChain", + "BaseChain", + "Runnable" + ], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_0-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_0-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_0-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_0-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_0.data.instance}}", + "prompt": "{{promptTemplate_0.data.instance}}", + "outputParser": "", + "inputModeration": "", + "chainName": "SQL Query Chain" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_0-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "outputPrediction" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 770.4559230968546, + "y": -127.11351409346554 + }, + "dragging": false + }, + { + "width": 300, + "height": 669, + "id": "chatOpenAI_0", + "position": { + "x": 376.92707114970364, + "y": -666.8088336865496 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "version": 6, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "gpt-3.5-turbo", + "id": "chatOpenAI_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 376.92707114970364, + "y": -666.8088336865496 + }, + "dragging": false + }, + { + "width": 300, + "height": 669, + "id": "chatOpenAI_1", + "position": { + "x": 2653.726672579251, + "y": -665.8849139437705 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_1", + "label": "ChatOpenAI", + "version": 6, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "gpt-3.5-turbo", + "id": "chatOpenAI_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_1-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 2653.726672579251, + "y": -665.8849139437705 + }, + "dragging": false + }, + { + "width": 300, + "height": 507, + "id": "llmChain_1", + "position": { + "x": 3089.9937691022837, + "y": -109.24001734925716 + }, + "type": "customNode", + "data": { + "id": "llmChain_1", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": [ + "LLMChain", + "BaseChain", + "Runnable" + ], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_1-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_1-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_1-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_1-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_1-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_1.data.instance}}", + "prompt": "{{promptTemplate_1.data.instance}}", + "outputParser": "", + "inputModeration": "", + "chainName": "Final Chain" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_1-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_1-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "llmChain" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 3089.9937691022837, + "y": -109.24001734925716 + }, + "dragging": false + }, + { + "width": 300, + "height": 674, + "id": "customFunction_2", + "position": { + "x": -19.95227863012829, + "y": -125.50600296188355 + }, + "type": "customNode", + "data": { + "id": "customFunction_2", + "label": "Custom JS Function", + "version": 2, + "name": "customFunction", + "type": "CustomFunction", + "baseClasses": [ + "CustomFunction", + "Utilities" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Execute custom javascript function", + "inputParams": [ + { + "label": "Input Variables", + "name": "functionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $var", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "customFunction_2-input-functionInputVariables-json" + }, + { + "label": "Function Name", + "name": "functionName", + "type": "string", + "optional": true, + "placeholder": "My Function", + "id": "customFunction_2-input-functionName-string" + }, + { + "label": "Javascript Function", + "name": "javascriptFunction", + "type": "code", + "id": "customFunction_2-input-javascriptFunction-code" + } + ], + "inputAnchors": [], + "inputs": { + "functionInputVariables": "", + "functionName": "Get SQL Schema Prompt", + "javascriptFunction": "const HOST = 'singlestore-host.com';\nconst USER = 'admin';\nconst PASSWORD = 'mypassword';\nconst DATABASE = 'mydb';\nconst TABLE = 'samples';\nconst mysql = require('mysql2/promise');\n\nlet sqlSchemaPrompt;\n\n/**\n * Ideal prompt contains schema info and examples\n * Follows best practices as specified form https://arxiv.org/abs/2204.00498\n * =========================================\n * CREATE TABLE samples (firstName varchar NOT NULL, lastName varchar)\n * SELECT * FROM samples LIMIT 3\n * firstName lastName\n * Stephen Tyler\n * Jack McGinnis\n * Steven Repici\n * =========================================\n*/\nfunction getSQLPrompt() {\n return new Promise(async (resolve, reject) => {\n try {\n const singleStoreConnection = mysql.createPool({\n host: HOST,\n user: USER,\n password: PASSWORD,\n database: DATABASE,\n });\n \n // Get schema info\n const [schemaInfo] = await singleStoreConnection.execute(\n `SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = \"${TABLE}\"`\n );\n \n const createColumns = [];\n const columnNames = [];\n \n for (const schemaData of schemaInfo) {\n columnNames.push(`${schemaData['COLUMN_NAME']}`);\n createColumns.push(`${schemaData['COLUMN_NAME']} ${schemaData['COLUMN_TYPE']} ${schemaData['IS_NULLABLE'] === 'NO' ? 'NOT NULL' : ''}`);\n }\n \n const sqlCreateTableQuery = `CREATE TABLE samples (${createColumns.join(', ')})`;\n const sqlSelectTableQuery = `SELECT * FROM samples LIMIT 3`;\n \n // Get first 3 rows\n const [rows] = await singleStoreConnection.execute(\n sqlSelectTableQuery,\n );\n \n const allValues = [];\n for (const row of rows) {\n const rowValues = [];\n for (const colName in row) {\n rowValues.push(row[colName]);\n }\n allValues.push(rowValues.join(' '));\n }\n \n sqlSchemaPrompt = sqlCreateTableQuery + '\\n' + sqlSelectTableQuery + '\\n' + columnNames.join(' ') + '\\n' + allValues.join('\\n');\n \n resolve();\n } catch (e) {\n console.error(e);\n return reject(e);\n }\n });\n}\n\nasync function main() {\n await getSQLPrompt();\n}\n\nawait main();\n\nreturn sqlSchemaPrompt;" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "customFunction_2-output-output-string|number|boolean|json|array", + "name": "output", + "label": "Output", + "description": "", + "type": "string | number | boolean | json | array" + }, + { + "id": "customFunction_2-output-EndingNode-CustomFunction", + "name": "EndingNode", + "label": "Ending Node", + "description": "", + "type": "CustomFunction" + } + ], + "default": "output" + } + ], + "outputs": { + "output": "output" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -19.95227863012829, + "y": -125.50600296188355 + }, + "dragging": false + }, + { + "width": 300, + "height": 674, + "id": "customFunction_1", + "position": { + "x": 1887.4670208331604, + "y": -275.95340782935716 + }, + "type": "customNode", + "data": { + "id": "customFunction_1", + "label": "Custom JS Function", + "version": 2, + "name": "customFunction", + "type": "CustomFunction", + "baseClasses": [ + "CustomFunction", + "Utilities" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Execute custom javascript function", + "inputParams": [ + { + "label": "Input Variables", + "name": "functionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $var", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "customFunction_1-input-functionInputVariables-json" + }, + { + "label": "Function Name", + "name": "functionName", + "type": "string", + "optional": true, + "placeholder": "My Function", + "id": "customFunction_1-input-functionName-string" + }, + { + "label": "Javascript Function", + "name": "javascriptFunction", + "type": "code", + "id": "customFunction_1-input-javascriptFunction-code" + } + ], + "inputAnchors": [], + "inputs": { + "functionInputVariables": "{\"sqlQuery\":\"{{setVariable_1.data.instance}}\"}", + "functionName": "Run SQL Query", + "javascriptFunction": "const HOST = 'singlestore-host.com';\nconst USER = 'admin';\nconst PASSWORD = 'mypassword';\nconst DATABASE = 'mydb';\nconst TABLE = 'samples';\nconst mysql = require('mysql2/promise');\n\nlet result;\n\nfunction getSQLResult() {\n return new Promise(async (resolve, reject) => {\n try {\n const singleStoreConnection = mysql.createPool({\n host: HOST,\n user: USER,\n password: PASSWORD,\n database: DATABASE,\n });\n \n const [rows] = await singleStoreConnection.execute(\n $sqlQuery\n );\n \n result = JSON.stringify(rows)\n \n resolve();\n } catch (e) {\n console.error(e);\n return reject(e);\n }\n });\n}\n\nasync function main() {\n await getSQLResult();\n}\n\nawait main();\n\nreturn result;" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "customFunction_1-output-output-string|number|boolean|json|array", + "name": "output", + "label": "Output", + "description": "", + "type": "string | number | boolean | json | array" + }, + { + "id": "customFunction_1-output-EndingNode-CustomFunction", + "name": "EndingNode", + "label": "Ending Node", + "description": "", + "type": "CustomFunction" + } + ], + "default": "output" + } + ], + "outputs": { + "output": "output" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1887.4670208331604, + "y": -275.95340782935716 + }, + "dragging": false + }, + { + "width": 300, + "height": 511, + "id": "promptTemplate_1", + "position": { + "x": 2655.2632506040304, + "y": 218.145615216618 + }, + "type": "customNode", + "data": { + "id": "promptTemplate_1", + "label": "Prompt Template", + "version": 1, + "name": "promptTemplate", + "type": "PromptTemplate", + "baseClasses": [ + "PromptTemplate", + "BaseStringPromptTemplate", + "BasePromptTemplate", + "Runnable" + ], + "category": "Prompts", + "description": "Schema to represent a basic prompt for an LLM", + "inputParams": [ + { + "label": "Template", + "name": "template", + "type": "string", + "rows": 4, + "placeholder": "What is a good name for a company that makes {product}?", + "id": "promptTemplate_1-input-template-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "promptTemplate_1-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "template": "Given the following user question, corresponding SQL query, and SQL result, answer the user question as details as possible.\n\nQuestion: {question}\n\nSQL Query: {sqlQuery}\n\nSQL Result: {sqlResponse}\n\nAnswer:\n", + "promptValues": "{\"question\":\"{{question}}\",\"sqlResponse\":\"{{customFunction_1.data.instance}}\",\"sqlQuery\":\"{{getVariable_1.data.instance}}\"}" + }, + "outputAnchors": [ + { + "id": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "name": "promptTemplate", + "label": "PromptTemplate", + "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 2655.2632506040304, + "y": 218.145615216618 + } + }, + { + "width": 300, + "height": 304, + "id": "getVariable_1", + "position": { + "x": 2272.8555266616872, + "y": 24.11364076336241 + }, + "type": "customNode", + "data": { + "id": "getVariable_1", + "label": "Get Variable", + "version": 2, + "name": "getVariable", + "type": "GetVariable", + "baseClasses": [ + "GetVariable", + "Utilities" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Get variable that was saved using Set Variable node", + "inputParams": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string", + "placeholder": "var1", + "id": "getVariable_1-input-variableName-string" + } + ], + "inputAnchors": [], + "inputs": { + "variableName": "sqlQuery" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "getVariable_1-output-output-string|number|boolean|json|array", + "name": "output", + "label": "Output", + "description": "", + "type": "string | number | boolean | json | array" + } + ], + "default": "output" + } + ], + "outputs": { + "output": "output" + }, + "selected": false + }, + "positionAbsolute": { + "x": 2272.8555266616872, + "y": 24.11364076336241 + }, + "selected": false, + "dragging": false + }, + { + "width": 300, + "height": 355, + "id": "setVariable_1", + "position": { + "x": 1516.338224315744, + "y": -133.6986023683283 + }, + "type": "customNode", + "data": { + "id": "setVariable_1", + "label": "Set Variable", + "version": 2, + "name": "setVariable", + "type": "SetVariable", + "baseClasses": [ + "SetVariable", + "Utilities" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Set variable which can be retrieved at a later stage. Variable is only available during runtime.", + "inputParams": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string", + "placeholder": "var1", + "id": "setVariable_1-input-variableName-string" + } + ], + "inputAnchors": [ + { + "label": "Input", + "name": "input", + "type": "string | number | boolean | json | array", + "optional": true, + "list": true, + "id": "setVariable_1-input-input-string | number | boolean | json | array" + } + ], + "inputs": { + "input": [ + "{{ifElseFunction_0.data.instance}}" + ], + "variableName": "sqlQuery" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "setVariable_1-output-output-string|number|boolean|json|array", + "name": "output", + "label": "Output", + "description": "", + "type": "string | number | boolean | json | array" + } + ], + "default": "output" + } + ], + "outputs": { + "output": "output" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1516.338224315744, + "y": -133.6986023683283 + }, + "dragging": false + }, + { + "width": 300, + "height": 765, + "id": "ifElseFunction_0", + "position": { + "x": 1147.8020838770517, + "y": -237.39478763322148 + }, + "type": "customNode", + "data": { + "id": "ifElseFunction_0", + "label": "IfElse Function", + "version": 2, + "name": "ifElseFunction", + "type": "IfElseFunction", + "baseClasses": [ + "IfElseFunction", + "Utilities" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Split flows based on If Else javascript functions", + "inputParams": [ + { + "label": "Input Variables", + "name": "functionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $var", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "ifElseFunction_0-input-functionInputVariables-json" + }, + { + "label": "IfElse Name", + "name": "functionName", + "type": "string", + "optional": true, + "placeholder": "If Condition Match", + "id": "ifElseFunction_0-input-functionName-string" + }, + { + "label": "If Function", + "name": "ifFunction", + "description": "Function must return a value", + "type": "code", + "rows": 2, + "default": "if (\"hello\" == \"hello\") {\n return true;\n}", + "id": "ifElseFunction_0-input-ifFunction-code" + }, + { + "label": "Else Function", + "name": "elseFunction", + "description": "Function must return a value", + "type": "code", + "rows": 2, + "default": "return false;", + "id": "ifElseFunction_0-input-elseFunction-code" + } + ], + "inputAnchors": [], + "inputs": { + "functionInputVariables": "{\"sqlQuery\":\"{{llmChain_0.data.instance}}\"}", + "functionName": "IF SQL Query contains SELECT and WHERE", + "ifFunction": "const sqlQuery = $sqlQuery.trim();\n\nconst regex = /SELECT\\s.*?(?:\\n|$)/gi;\n\n// Extracting the SQL part\nconst matches = sqlQuery.match(regex);\nconst cleanSql = matches ? matches[0].trim() : \"\";\n\nif (cleanSql.includes(\"SELECT\") && cleanSql.includes(\"WHERE\")) {\n return cleanSql;\n}", + "elseFunction": "return $sqlQuery;" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "ifElseFunction_0-output-returnTrue-string|number|boolean|json|array", + "name": "returnTrue", + "label": "True", + "description": "", + "type": "string | number | boolean | json | array" + }, + { + "id": "ifElseFunction_0-output-returnFalse-string|number|boolean|json|array", + "name": "returnFalse", + "label": "False", + "description": "", + "type": "string | number | boolean | json | array" + } + ], + "default": "returnTrue" + } + ], + "outputs": { + "output": "returnTrue" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1147.8020838770517, + "y": -237.39478763322148 + }, + "dragging": false + }, + { + "width": 300, + "height": 511, + "id": "promptTemplate_2", + "position": { + "x": 1193.7489579044463, + "y": 615.4009446588724 + }, + "type": "customNode", + "data": { + "id": "promptTemplate_2", + "label": "Prompt Template", + "version": 1, + "name": "promptTemplate", + "type": "PromptTemplate", + "baseClasses": [ + "PromptTemplate", + "BaseStringPromptTemplate", + "BasePromptTemplate", + "Runnable" + ], + "category": "Prompts", + "description": "Schema to represent a basic prompt for an LLM", + "inputParams": [ + { + "label": "Template", + "name": "template", + "type": "string", + "rows": 4, + "placeholder": "What is a good name for a company that makes {product}?", + "id": "promptTemplate_2-input-template-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "promptTemplate_2-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "template": "Politely say \"I'm not able to answer query\"", + "promptValues": "{\"schema\":\"{{setVariable_0.data.instance}}\",\"question\":\"{{question}}\"}" + }, + "outputAnchors": [ + { + "id": "promptTemplate_2-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "name": "promptTemplate", + "label": "PromptTemplate", + "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1193.7489579044463, + "y": 615.4009446588724 + }, + "dragging": false + }, + { + "width": 300, + "height": 669, + "id": "chatOpenAI_2", + "position": { + "x": 1545.1023725538003, + "y": 493.5495798408175 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_2", + "label": "ChatOpenAI", + "version": 6, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_2-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "gpt-3.5-turbo", + "id": "chatOpenAI_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_2-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_2-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_2-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0.7", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": true, + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1545.1023725538003, + "y": 493.5495798408175 + }, + "dragging": false + }, + { + "width": 300, + "height": 507, + "id": "llmChain_2", + "position": { + "x": 1914.509823868027, + "y": 622.3435967391327 + }, + "type": "customNode", + "data": { + "id": "llmChain_2", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": [ + "LLMChain", + "BaseChain", + "Runnable" + ], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_2-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_2-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_2-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_2-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_2-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_2.data.instance}}", + "prompt": "{{promptTemplate_2.data.instance}}", + "outputParser": "", + "inputModeration": "", + "chainName": "Fallback Chain" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_2-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_2-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "llmChain" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1914.509823868027, + "y": 622.3435967391327 + }, + "dragging": false + }, + { + "id": "stickyNote_0", + "position": { + "x": -18.950231412347364, + "y": -192.2980180516393 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_0", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_0-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "First, get SQL database schema" + }, + "outputAnchors": [ + { + "id": "stickyNote_0-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 42, + "selected": false, + "positionAbsolute": { + "x": -18.950231412347364, + "y": -192.2980180516393 + }, + "dragging": false + }, + { + "id": "stickyNote_1", + "position": { + "x": 1510.6324834799852, + "y": -221.78240261184442 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_1", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_1-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Save as variable to be used at the last Prompt Template" + }, + "outputAnchors": [ + { + "id": "stickyNote_1-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 62, + "selected": false, + "positionAbsolute": { + "x": 1510.6324834799852, + "y": -221.78240261184442 + }, + "dragging": false + }, + { + "id": "stickyNote_2", + "position": { + "x": 386.88037412001086, + "y": 47.66735767574478 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_2", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_2-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Instruct LLM to return a SQL query using the schema.\n\nRecommend to give few examples for higher accuracy. \n\nChange the prompt accordingly to suit the type of database you are using" + }, + "outputAnchors": [ + { + "id": "stickyNote_2-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 183, + "selected": false, + "positionAbsolute": { + "x": 386.88037412001086, + "y": 47.66735767574478 + }, + "dragging": false + }, + { + "id": "stickyNote_3", + "position": { + "x": 1148.366177280569, + "y": -330.2148999791981 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_3", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_3-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Check if SQL Query is valid\n\nIf not, avoid executing it and return to user " + }, + "outputAnchors": [ + { + "id": "stickyNote_3-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 82, + "selected": false, + "positionAbsolute": { + "x": 1148.366177280569, + "y": -330.2148999791981 + }, + "dragging": false + }, + { + "id": "stickyNote_4", + "position": { + "x": 1881.2554569013519, + "y": -435.79147130381756 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_4", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_4-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Execute the SQL query after validated, and get the list of results back.\n\nTo avoid long list of results overflowing token limit, try capping the length of result here" + }, + "outputAnchors": [ + { + "id": "stickyNote_4-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 143, + "selected": false, + "positionAbsolute": { + "x": 1881.2554569013519, + "y": -435.79147130381756 + }, + "dragging": false + }, + { + "id": "stickyNote_5", + "position": { + "x": 1545.0242031958799, + "y": 428.37859733277077 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_5", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_5-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Fallback answer if SQL query is not valid" + }, + "outputAnchors": [ + { + "id": "stickyNote_5-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 42, + "selected": false, + "positionAbsolute": { + "x": 1545.0242031958799, + "y": 428.37859733277077 + }, + "dragging": false + }, + { + "id": "stickyNote_6", + "position": { + "x": 2653.037036258241, + "y": 53.55638699917168 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_6", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_6-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "This is the final prompt.\n\nCombine the following:\nQuestion + SQL query + SQL result\n\nto generate a final answer" + }, + "outputAnchors": [ + { + "id": "stickyNote_6-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 143, + "selected": false, + "positionAbsolute": { + "x": 2653.037036258241, + "y": 53.55638699917168 + }, + "dragging": false + }, + { + "id": "stickyNote_7", + "position": { + "x": 2267.355938520518, + "y": -56.64296923028309 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_7", + "label": "Sticky Note", + "version": 2, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": [ + "StickyNote" + ], + "tags": [ + "Utilities" + ], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_7-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Get the saved variable value to be used in prompt" + }, + "outputAnchors": [ + { + "id": "stickyNote_7-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "description": "Add a sticky note", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 62, + "selected": false, + "positionAbsolute": { + "x": 2267.355938520518, + "y": -56.64296923028309 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "promptTemplate_0", + "sourceHandle": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "promptTemplate_0-promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_1", + "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_1", + "targetHandle": "llmChain_1-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_1-llmChain_1-input-model-BaseLanguageModel", + "data": { + "label": "" + } + }, + { + "source": "customFunction_1", + "sourceHandle": "customFunction_1-output-output-string|number|boolean|json|array", + "target": "promptTemplate_1", + "targetHandle": "promptTemplate_1-input-promptValues-json", + "type": "buttonedge", + "id": "customFunction_1-customFunction_1-output-output-string|number|boolean|json|array-promptTemplate_1-promptTemplate_1-input-promptValues-json", + "data": { + "label": "" + } + }, + { + "source": "promptTemplate_1", + "sourceHandle": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_1", + "targetHandle": "llmChain_1-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "promptTemplate_1-promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_1-llmChain_1-input-prompt-BasePromptTemplate", + "data": { + "label": "" + } + }, + { + "source": "getVariable_1", + "sourceHandle": "getVariable_1-output-output-string|number|boolean|json|array", + "target": "promptTemplate_1", + "targetHandle": "promptTemplate_1-input-promptValues-json", + "type": "buttonedge", + "id": "getVariable_1-getVariable_1-output-output-string|number|boolean|json|array-promptTemplate_1-promptTemplate_1-input-promptValues-json", + "data": { + "label": "" + } + }, + { + "source": "setVariable_1", + "sourceHandle": "setVariable_1-output-output-string|number|boolean|json|array", + "target": "customFunction_1", + "targetHandle": "customFunction_1-input-functionInputVariables-json", + "type": "buttonedge", + "id": "setVariable_1-setVariable_1-output-output-string|number|boolean|json|array-customFunction_1-customFunction_1-input-functionInputVariables-json", + "data": { + "label": "" + } + }, + { + "source": "llmChain_0", + "sourceHandle": "llmChain_0-output-outputPrediction-string|json", + "target": "ifElseFunction_0", + "targetHandle": "ifElseFunction_0-input-functionInputVariables-json", + "type": "buttonedge", + "id": "llmChain_0-llmChain_0-output-outputPrediction-string|json-ifElseFunction_0-ifElseFunction_0-input-functionInputVariables-json" + }, + { + "source": "ifElseFunction_0", + "sourceHandle": "ifElseFunction_0-output-returnTrue-string|number|boolean|json|array", + "target": "setVariable_1", + "targetHandle": "setVariable_1-input-input-string | number | boolean | json | array", + "type": "buttonedge", + "id": "ifElseFunction_0-ifElseFunction_0-output-returnTrue-string|number|boolean|json|array-setVariable_1-setVariable_1-input-input-string | number | boolean | json | array" + }, + { + "source": "ifElseFunction_0", + "sourceHandle": "ifElseFunction_0-output-returnFalse-string|number|boolean|json|array", + "target": "promptTemplate_2", + "targetHandle": "promptTemplate_2-input-promptValues-json", + "type": "buttonedge", + "id": "ifElseFunction_0-ifElseFunction_0-output-returnFalse-string|number|boolean|json|array-promptTemplate_2-promptTemplate_2-input-promptValues-json" + }, + { + "source": "chatOpenAI_2", + "sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_2", + "targetHandle": "llmChain_2-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_2-llmChain_2-input-model-BaseLanguageModel" + }, + { + "source": "promptTemplate_2", + "sourceHandle": "promptTemplate_2-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_2", + "targetHandle": "llmChain_2-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "promptTemplate_2-promptTemplate_2-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_2-llmChain_2-input-prompt-BasePromptTemplate" + }, + { + "source": "customFunction_2", + "sourceHandle": "customFunction_2-output-output-string|number|boolean|json|array", + "target": "promptTemplate_0", + "targetHandle": "promptTemplate_0-input-promptValues-json", + "type": "buttonedge", + "id": "customFunction_2-customFunction_2-output-output-string|number|boolean|json|array-promptTemplate_0-promptTemplate_0-input-promptValues-json" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/SQL Chatflow.json b/fr/.gitbook/assets/SQL Chatflow.json new file mode 100644 index 00000000..3ce78590 --- /dev/null +++ b/fr/.gitbook/assets/SQL Chatflow.json @@ -0,0 +1,1614 @@ +{ + "nodes": [ + { + "width": 300, + "height": 779, + "id": "promptTemplate_0", + "position": { + "x": 379.11224395092825, + "y": 71.33564730890853 + }, + "type": "customNode", + "data": { + "id": "promptTemplate_0", + "label": "Prompt Template", + "version": 1, + "name": "promptTemplate", + "type": "PromptTemplate", + "baseClasses": [ + "PromptTemplate", + "BaseStringPromptTemplate", + "BasePromptTemplate", + "Runnable" + ], + "category": "Prompts", + "description": "Schema to represent a basic prompt for an LLM", + "inputParams": [ + { + "label": "Template", + "name": "template", + "type": "string", + "rows": 4, + "placeholder": "What is a good name for a company that makes {product}?", + "id": "promptTemplate_0-input-template-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "promptTemplate_0-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "template": "Based on the provided SQL table schema and question below, return a SQL SELECT ALL query that would answer the user's question. For example: SELECT * FROM table WHERE id = '1'.\n------------\nSCHEMA: {schema}\n------------\nQUESTION: {question}\n------------\nSQL QUERY:", + "promptValues": "{\"schema\":\"{{customFunction_2.data.instance}}\",\"question\":\"{{question}}\"}" + }, + "outputAnchors": [ + { + "id": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "name": "promptTemplate", + "label": "PromptTemplate", + "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 379.11224395092825, + "y": 71.33564730890853 + }, + "dragging": false + }, + { + "width": 300, + "height": 506, + "id": "llmChain_0", + "position": { + "x": 770.4559230968546, + "y": -127.11351409346554 + }, + "type": "customNode", + "data": { + "id": "llmChain_0", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": [ + "LLMChain", + "BaseChain", + "Runnable" + ], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_0-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_0-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_0-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_0-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_1.data.instance}}", + "prompt": "{{promptTemplate_0.data.instance}}", + "outputParser": "", + "inputModeration": "", + "chainName": "SQL Query Chain" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_0-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "outputPrediction" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 770.4559230968546, + "y": -127.11351409346554 + }, + "dragging": false + }, + { + "width": 300, + "height": 506, + "id": "llmChain_1", + "position": { + "x": 2330.1281944523407, + "y": -325.61633963017937 + }, + "type": "customNode", + "data": { + "id": "llmChain_1", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": [ + "LLMChain", + "BaseChain", + "Runnable" + ], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_1-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_1-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_1-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_1-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_1-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_3.data.instance}}", + "prompt": "{{promptTemplate_1.data.instance}}", + "outputParser": "", + "inputModeration": "", + "chainName": "Final Chain" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_1-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_1-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "llmChain" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 2330.1281944523407, + "y": -325.61633963017937 + }, + "dragging": false + }, + { + "width": 300, + "height": 668, + "id": "customFunction_2", + "position": { + "x": -12.41348902418423, + "y": -198.08456515277106 + }, + "type": "customNode", + "data": { + "id": "customFunction_2", + "label": "Custom JS Function", + "version": 1, + "name": "customFunction", + "type": "CustomFunction", + "baseClasses": [ + "CustomFunction", + "Utilities" + ], + "category": "Utilities", + "description": "Execute custom javascript function", + "inputParams": [ + { + "label": "Input Variables", + "name": "functionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $var", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "customFunction_2-input-functionInputVariables-json" + }, + { + "label": "Function Name", + "name": "functionName", + "type": "string", + "placeholder": "My Function", + "id": "customFunction_2-input-functionName-string" + }, + { + "label": "Javascript Function", + "name": "javascriptFunction", + "type": "code", + "id": "customFunction_2-input-javascriptFunction-code" + } + ], + "inputAnchors": [], + "inputs": { + "functionInputVariables": "", + "functionName": "Get SQL Schema Prompt", + "javascriptFunction": "const HOST = 'svc-abc.aws-oregon-3.svc.singlestore.com';\nconst USER = 'admin';\nconst PASSWORD = '123';\nconst DATABASE = 'mydb';\nconst TABLE = 'samples';\nconst mysql = require('mysql2/promise');\n\nlet sqlSchemaPrompt;\n\n/**\n * Ideal prompt contains schema info and examples\n * Follows best practices as specified form https://arxiv.org/abs/2204.00498\n * =========================================\n * CREATE TABLE samples (firstName varchar NOT NULL, lastName varchar)\n * SELECT * FROM samples LIMIT 3\n * firstName lastName\n * Stephen Tyler\n * Jack McGinnis\n * Steven Repici\n * =========================================\n*/\nfunction getSQLPrompt() {\n return new Promise(async (resolve, reject) => {\n try {\n const singleStoreConnection = mysql.createPool({\n host: HOST,\n user: USER,\n password: PASSWORD,\n database: DATABASE,\n });\n \n // Get schema info\n const [schemaInfo] = await singleStoreConnection.execute(\n `SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = \"${TABLE}\"`\n );\n \n const createColumns = [];\n const columnNames = [];\n \n for (const schemaData of schemaInfo) {\n columnNames.push(`${schemaData['COLUMN_NAME']}`);\n createColumns.push(`${schemaData['COLUMN_NAME']} ${schemaData['COLUMN_TYPE']} ${schemaData['IS_NULLABLE'] === 'NO' ? 'NOT NULL' : ''}`);\n }\n \n const sqlCreateTableQuery = `CREATE TABLE samples (${createColumns.join(', ')})`;\n const sqlSelectTableQuery = `SELECT * FROM samples LIMIT 3`;\n \n // Get first 3 rows\n const [rows] = await singleStoreConnection.execute(\n sqlSelectTableQuery,\n );\n \n const allValues = [];\n for (const row of rows) {\n const rowValues = [];\n for (const colName in row) {\n rowValues.push(row[colName]);\n }\n allValues.push(rowValues.join(' '));\n }\n \n sqlSchemaPrompt = sqlCreateTableQuery + '\\n' + sqlSelectTableQuery + '\\n' + columnNames.join(' ') + '\\n' + allValues.join('\\n');\n \n resolve();\n } catch (e) {\n console.error(e);\n return reject(e);\n }\n });\n}\n\nasync function main() {\n await getSQLPrompt();\n}\n\nawait main();\n\nreturn sqlSchemaPrompt;" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "customFunction_2-output-output-string|number|boolean|json|array", + "name": "output", + "label": "Output", + "type": "string | number | boolean | json | array" + } + ], + "default": "output" + } + ], + "outputs": { + "output": "output" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -12.41348902418423, + "y": -198.08456515277106 + }, + "dragging": false + }, + { + "width": 300, + "height": 668, + "id": "customFunction_1", + "position": { + "x": 1574.8757854291205, + "y": -510.612625067788 + }, + "type": "customNode", + "data": { + "id": "customFunction_1", + "label": "Custom JS Function", + "version": 1, + "name": "customFunction", + "type": "CustomFunction", + "baseClasses": [ + "CustomFunction", + "Utilities" + ], + "category": "Utilities", + "description": "Execute custom javascript function", + "inputParams": [ + { + "label": "Input Variables", + "name": "functionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $var", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "customFunction_1-input-functionInputVariables-json" + }, + { + "label": "Function Name", + "name": "functionName", + "type": "string", + "placeholder": "My Function", + "id": "customFunction_1-input-functionName-string" + }, + { + "label": "Javascript Function", + "name": "javascriptFunction", + "type": "code", + "id": "customFunction_1-input-javascriptFunction-code" + } + ], + "inputAnchors": [], + "inputs": { + "functionInputVariables": "{\"sqlQuery\":\"{{ifElseFunction_0.data.instance}}\"}", + "functionName": "Run SQL Query", + "javascriptFunction": "const HOST = 'svc-abc.aws-oregon-3.svc.singlestore.com';\nconst USER = 'admin';\nconst PASSWORD = '123';\nconst DATABASE = 'mydb';\nconst TABLE = 'samples';\nconst mysql = require('mysql2/promise');\n\nlet result;\n\nfunction getSQLResult() {\n return new Promise(async (resolve, reject) => {\n try {\n const singleStoreConnection = mysql.createPool({\n host: HOST,\n user: USER,\n password: PASSWORD,\n database: DATABASE,\n });\n \n const [rows] = await singleStoreConnection.execute(\n $sqlQuery\n );\n \n result = JSON.stringify(rows)\n \n resolve();\n } catch (e) {\n console.error(e);\n return reject(e);\n }\n });\n}\n\nasync function main() {\n await getSQLResult();\n}\n\nawait main();\n\nreturn result;" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "customFunction_1-output-output-string|number|boolean|json|array", + "name": "output", + "label": "Output", + "type": "string | number | boolean | json | array" + } + ], + "default": "output" + } + ], + "outputs": { + "output": "output" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1574.8757854291205, + "y": -510.612625067788 + }, + "dragging": false + }, + { + "width": 300, + "height": 779, + "id": "promptTemplate_1", + "position": { + "x": 1943.5658568848553, + "y": -83.07909710675825 + }, + "type": "customNode", + "data": { + "id": "promptTemplate_1", + "label": "Prompt Template", + "version": 1, + "name": "promptTemplate", + "type": "PromptTemplate", + "baseClasses": [ + "PromptTemplate", + "BaseStringPromptTemplate", + "BasePromptTemplate", + "Runnable" + ], + "category": "Prompts", + "description": "Schema to represent a basic prompt for an LLM", + "inputParams": [ + { + "label": "Template", + "name": "template", + "type": "string", + "rows": 4, + "placeholder": "What is a good name for a company that makes {product}?", + "id": "promptTemplate_1-input-template-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "promptTemplate_1-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "template": "Based on the question, and SQL response, write a natural language response, be details as possible:\n------------\nQUESTION: {question}\n------------\nSQL RESPONSE: {sqlResponse}\n------------\nNATURAL LANGUAGE RESPONSE:", + "promptValues": "{\"question\":\"{{question}}\",\"sqlResponse\":\"{{customFunction_1.data.instance}}\"}" + }, + "outputAnchors": [ + { + "id": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "name": "promptTemplate", + "label": "PromptTemplate", + "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1943.5658568848553, + "y": -83.07909710675825 + } + }, + { + "width": 300, + "height": 754, + "id": "ifElseFunction_0", + "position": { + "x": 1217.9173937613534, + "y": -515.3650041424041 + }, + "type": "customNode", + "data": { + "id": "ifElseFunction_0", + "label": "IfElse Function", + "version": 1, + "name": "ifElseFunction", + "type": "IfElseFunction", + "baseClasses": [ + "IfElseFunction", + "Utilities" + ], + "category": "Utilities", + "description": "Split flows based on If Else javascript functions", + "inputParams": [ + { + "label": "Input Variables", + "name": "functionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $var", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "ifElseFunction_0-input-functionInputVariables-json" + }, + { + "label": "IfElse Name", + "name": "functionName", + "type": "string", + "optional": true, + "placeholder": "If Condition Match", + "id": "ifElseFunction_0-input-functionName-string" + }, + { + "label": "If Function", + "name": "ifFunction", + "description": "Function must return a value", + "type": "code", + "rows": 2, + "default": "if (\"hello\" == \"hello\") {\n return true;\n}", + "id": "ifElseFunction_0-input-ifFunction-code" + }, + { + "label": "Else Function", + "name": "elseFunction", + "description": "Function must return a value", + "type": "code", + "rows": 2, + "default": "return false;", + "id": "ifElseFunction_0-input-elseFunction-code" + } + ], + "inputAnchors": [], + "inputs": { + "functionInputVariables": "{\"sqlQuery\":\"{{llmChain_0.data.instance}}\"}", + "functionName": "IF SQL Query contains SELECT and WHERE", + "ifFunction": "const sqlQuery = $sqlQuery.trim();\n\nif (sqlQuery.includes(\"SELECT\") && sqlQuery.includes(\"WHERE\")) {\n return sqlQuery;\n}", + "elseFunction": "return $sqlQuery;" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "ifElseFunction_0-output-returnTrue-string|number|boolean|json|array", + "name": "returnTrue", + "label": "True", + "type": "string | number | boolean | json | array" + }, + { + "id": "ifElseFunction_0-output-returnFalse-string|number|boolean|json|array", + "name": "returnFalse", + "label": "False", + "type": "string | number | boolean | json | array" + } + ], + "default": "returnTrue" + } + ], + "outputs": { + "output": "returnTrue" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1217.9173937613534, + "y": -515.3650041424041 + }, + "dragging": false + }, + { + "width": 300, + "height": 779, + "id": "promptTemplate_2", + "position": { + "x": 1577.4729260684187, + "y": 887.7668360114285 + }, + "type": "customNode", + "data": { + "id": "promptTemplate_2", + "label": "Prompt Template", + "version": 1, + "name": "promptTemplate", + "type": "PromptTemplate", + "baseClasses": [ + "PromptTemplate", + "BaseStringPromptTemplate", + "BasePromptTemplate", + "Runnable" + ], + "category": "Prompts", + "description": "Schema to represent a basic prompt for an LLM", + "inputParams": [ + { + "label": "Template", + "name": "template", + "type": "string", + "rows": 4, + "placeholder": "What is a good name for a company that makes {product}?", + "id": "promptTemplate_2-input-template-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "promptTemplate_2-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "template": "Politely say \"I'm not able to answer query\"", + "promptValues": "{\"schema\":\"{{setVariable_0.data.instance}}\",\"question\":\"{{question}}\"}" + }, + "outputAnchors": [ + { + "id": "promptTemplate_2-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "name": "promptTemplate", + "label": "PromptTemplate", + "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1577.4729260684187, + "y": 887.7668360114285 + }, + "dragging": false + }, + { + "width": 300, + "height": 506, + "id": "llmChain_2", + "position": { + "x": 1942.2473639184586, + "y": 534.2501352750406 + }, + "type": "customNode", + "data": { + "id": "llmChain_2", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": [ + "LLMChain", + "BaseChain", + "Runnable" + ], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_2-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_2-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_2-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_2-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_2-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_2.data.instance}}", + "prompt": "{{promptTemplate_2.data.instance}}", + "outputParser": "", + "inputModeration": "", + "chainName": "Fallback Chain" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_2-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_2-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "llmChain" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1942.2473639184586, + "y": 534.2501352750406 + }, + "dragging": false + }, + { + "id": "chatOpenAI_1", + "position": { + "x": 375.16318421173054, + "y": -645.2584301535801 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_1", + "label": "ChatOpenAI", + "version": 5, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-1106-vision-preview", + "name": "gpt-4-1106-vision-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_1-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_1-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_1-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_1-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 670, + "selected": false, + "positionAbsolute": { + "x": 375.16318421173054, + "y": -645.2584301535801 + }, + "dragging": false + }, + { + "id": "chatOpenAI_3", + "position": { + "x": 1948.9511168108475, + "y": -796.7149375857242 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_3", + "label": "ChatOpenAI", + "version": 5, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_3-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-1106-vision-preview", + "name": "gpt-4-1106-vision-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_3-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_3-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_3-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_3-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_3-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_3-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_3-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_3-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_3-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_3-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_3-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_3-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 670, + "selected": false, + "positionAbsolute": { + "x": 1948.9511168108475, + "y": -796.7149375857242 + }, + "dragging": false + }, + { + "id": "chatOpenAI_2", + "position": { + "x": 1566.0508325767967, + "y": 194.95875577740696 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_2", + "label": "ChatOpenAI", + "version": 5, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_2-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-1106-vision-preview", + "name": "gpt-4-1106-vision-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_2-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_2-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_2-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_2-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 670, + "selected": false, + "positionAbsolute": { + "x": 1566.0508325767967, + "y": 194.95875577740696 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "promptTemplate_0", + "sourceHandle": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "promptTemplate_0-promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate", + "data": { + "label": "" + } + }, + { + "source": "customFunction_1", + "sourceHandle": "customFunction_1-output-output-string|number|boolean|json|array", + "target": "promptTemplate_1", + "targetHandle": "promptTemplate_1-input-promptValues-json", + "type": "buttonedge", + "id": "customFunction_1-customFunction_1-output-output-string|number|boolean|json|array-promptTemplate_1-promptTemplate_1-input-promptValues-json", + "data": { + "label": "" + } + }, + { + "source": "promptTemplate_1", + "sourceHandle": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_1", + "targetHandle": "llmChain_1-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "promptTemplate_1-promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_1-llmChain_1-input-prompt-BasePromptTemplate", + "data": { + "label": "" + } + }, + { + "source": "llmChain_0", + "sourceHandle": "llmChain_0-output-outputPrediction-string|json", + "target": "ifElseFunction_0", + "targetHandle": "ifElseFunction_0-input-functionInputVariables-json", + "type": "buttonedge", + "id": "llmChain_0-llmChain_0-output-outputPrediction-string|json-ifElseFunction_0-ifElseFunction_0-input-functionInputVariables-json" + }, + { + "source": "ifElseFunction_0", + "sourceHandle": "ifElseFunction_0-output-returnFalse-string|number|boolean|json|array", + "target": "promptTemplate_2", + "targetHandle": "promptTemplate_2-input-promptValues-json", + "type": "buttonedge", + "id": "ifElseFunction_0-ifElseFunction_0-output-returnFalse-string|number|boolean|json|array-promptTemplate_2-promptTemplate_2-input-promptValues-json" + }, + { + "source": "promptTemplate_2", + "sourceHandle": "promptTemplate_2-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_2", + "targetHandle": "llmChain_2-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "promptTemplate_2-promptTemplate_2-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_2-llmChain_2-input-prompt-BasePromptTemplate" + }, + { + "source": "customFunction_2", + "sourceHandle": "customFunction_2-output-output-string|number|boolean|json|array", + "target": "promptTemplate_0", + "targetHandle": "promptTemplate_0-input-promptValues-json", + "type": "buttonedge", + "id": "customFunction_2-customFunction_2-output-output-string|number|boolean|json|array-promptTemplate_0-promptTemplate_0-input-promptValues-json" + }, + { + "source": "ifElseFunction_0", + "sourceHandle": "ifElseFunction_0-output-returnTrue-string|number|boolean|json|array", + "target": "customFunction_1", + "targetHandle": "customFunction_1-input-functionInputVariables-json", + "type": "buttonedge", + "id": "ifElseFunction_0-ifElseFunction_0-output-returnTrue-string|number|boolean|json|array-customFunction_1-customFunction_1-input-functionInputVariables-json" + }, + { + "source": "chatOpenAI_1", + "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel" + }, + { + "source": "chatOpenAI_3", + "sourceHandle": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_1", + "targetHandle": "llmChain_1-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_3-chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_1-llmChain_1-input-model-BaseLanguageModel" + }, + { + "source": "chatOpenAI_2", + "sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_2", + "targetHandle": "llmChain_2-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_2-llmChain_2-input-model-BaseLanguageModel" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/Screenshot 2024-02-29 011714.png b/fr/.gitbook/assets/Screenshot 2024-02-29 011714.png new file mode 100644 index 00000000..86ae8cbe Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2024-02-29 011714.png differ diff --git a/fr/.gitbook/assets/Screenshot 2024-02-29 012538.png b/fr/.gitbook/assets/Screenshot 2024-02-29 012538.png new file mode 100644 index 00000000..836d8aaf Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2024-02-29 012538.png differ diff --git a/fr/.gitbook/assets/Screenshot 2024-03-05 141551.png b/fr/.gitbook/assets/Screenshot 2024-03-05 141551.png new file mode 100644 index 00000000..00d83e5c Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2024-03-05 141551.png differ diff --git a/fr/.gitbook/assets/Screenshot 2024-03-05 141619.png b/fr/.gitbook/assets/Screenshot 2024-03-05 141619.png new file mode 100644 index 00000000..cd9f5f45 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2024-03-05 141619.png differ diff --git a/fr/.gitbook/assets/Screenshot 2024-08-26 170456.png b/fr/.gitbook/assets/Screenshot 2024-08-26 170456.png new file mode 100644 index 00000000..d92144a8 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2024-08-26 170456.png differ diff --git a/fr/.gitbook/assets/Screenshot 2024-11-29 155926.png b/fr/.gitbook/assets/Screenshot 2024-11-29 155926.png new file mode 100644 index 00000000..2334c19a Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2024-11-29 155926.png differ diff --git a/fr/.gitbook/assets/Screenshot 2024-12-23 180712.png b/fr/.gitbook/assets/Screenshot 2024-12-23 180712.png new file mode 100644 index 00000000..2ad90f06 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2024-12-23 180712.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-12 215934.png b/fr/.gitbook/assets/Screenshot 2025-05-12 215934.png new file mode 100644 index 00000000..a4afda01 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-12 215934.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 153946.png b/fr/.gitbook/assets/Screenshot 2025-05-16 153946.png new file mode 100644 index 00000000..496bb90c Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 153946.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 154908.png b/fr/.gitbook/assets/Screenshot 2025-05-16 154908.png new file mode 100644 index 00000000..6f6df264 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 154908.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 160038.png b/fr/.gitbook/assets/Screenshot 2025-05-16 160038.png new file mode 100644 index 00000000..21dc81fa Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 160038.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 160347.png b/fr/.gitbook/assets/Screenshot 2025-05-16 160347.png new file mode 100644 index 00000000..5fc34448 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 160347.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 160427.png b/fr/.gitbook/assets/Screenshot 2025-05-16 160427.png new file mode 100644 index 00000000..232f3ab2 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 160427.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 160752.png b/fr/.gitbook/assets/Screenshot 2025-05-16 160752.png new file mode 100644 index 00000000..6ff1c154 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 160752.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 161605.png b/fr/.gitbook/assets/Screenshot 2025-05-16 161605.png new file mode 100644 index 00000000..86cbe4aa Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 161605.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-05-16 161711.png b/fr/.gitbook/assets/Screenshot 2025-05-16 161711.png new file mode 100644 index 00000000..244c7a0b Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-05-16 161711.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-06-10 232758.png b/fr/.gitbook/assets/Screenshot 2025-06-10 232758.png new file mode 100644 index 00000000..47232e88 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-06-10 232758.png differ diff --git a/fr/.gitbook/assets/Screenshot 2025-06-18 100402.png b/fr/.gitbook/assets/Screenshot 2025-06-18 100402.png new file mode 100644 index 00000000..58198488 Binary files /dev/null and b/fr/.gitbook/assets/Screenshot 2025-06-18 100402.png differ diff --git a/fr/.gitbook/assets/Structured Output.json b/fr/.gitbook/assets/Structured Output.json new file mode 100644 index 00000000..6390ff44 --- /dev/null +++ b/fr/.gitbook/assets/Structured Output.json @@ -0,0 +1,882 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": 108, + "y": 99.5 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startEphemeralMemory": "", + "startState": [ + { + "key": "answers", + "value": "" + } + ], + "startPersistState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "positionAbsolute": { + "x": 108, + "y": 99.5 + }, + "selected": false, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 251, + "y": 96.25 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Generate Structured Output", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are helpful assistant.

Today's date is {{ current_date_time }}

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "answers", + "type": "jsonArray", + "enumValues": "", + "jsonSchema": "{\n \"name\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Name of the event\"\n },\n \"date\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Date of the event\"\n },\n \"location\": {\n \"type\": \"string\",\n \"required\": true,\n \"description\": \"Location of the event\"\n }\n}", + "description": "answer to user query" + } + ], + "llmUpdateState": [ + { + "key": "answers", + "value": "

{{ output.answers }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 252, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 251, + "y": 96.25 + }, + "dragging": false + }, + { + "id": "iterationAgentflow_0", + "position": { + "x": 538.5, + "y": 7.25 + }, + "data": { + "id": "iterationAgentflow_0", + "label": "Loop through JSON array", + "version": 1, + "name": "iterationAgentflow", + "type": "Iteration", + "color": "#9C89B8", + "baseClasses": [ + "Iteration" + ], + "category": "Agent Flows", + "description": "Execute the nodes within the iteration block through N iterations", + "inputParams": [ + { + "label": "Array Input", + "name": "iterationInput", + "type": "string", + "description": "The input array to iterate over", + "acceptVariable": true, + "rows": 4, + "id": "iterationAgentflow_0-input-iterationInput-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "iterationInput": "

{{ $flow.state.answers }}

" + }, + "outputAnchors": [ + { + "id": "iterationAgentflow_0-output-iterationAgentflow", + "label": "Iteration", + "name": "iterationAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "iteration", + "width": 300, + "height": 250, + "selected": false, + "positionAbsolute": { + "x": 538.5, + "y": 7.25 + }, + "dragging": false, + "resizing": false + }, + { + "id": "httpAgentflow_0", + "position": { + "x": 91, + "y": 87 + }, + "data": { + "id": "httpAgentflow_0", + "label": "Call HTTP", + "version": 1.1, + "name": "httpAgentflow", + "type": "HTTP", + "color": "#FF7F7F", + "baseClasses": [ + "HTTP" + ], + "category": "Agent Flows", + "description": "Send a HTTP request", + "inputParams": [ + { + "label": "HTTP Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "httpBasicAuth", + "httpBearerToken", + "httpApiKey" + ], + "optional": true, + "id": "httpAgentflow_0-input-credential-credential", + "display": true + }, + { + "label": "Method", + "name": "method", + "type": "options", + "options": [ + { + "label": "GET", + "name": "GET" + }, + { + "label": "POST", + "name": "POST" + }, + { + "label": "PUT", + "name": "PUT" + }, + { + "label": "DELETE", + "name": "DELETE" + }, + { + "label": "PATCH", + "name": "PATCH" + } + ], + "default": "GET", + "id": "httpAgentflow_0-input-method-options", + "display": true + }, + { + "label": "URL", + "name": "url", + "type": "string", + "id": "httpAgentflow_0-input-url-string", + "display": true + }, + { + "label": "Headers", + "name": "headers", + "type": "array", + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "default": "" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "default": "", + "acceptVariable": true + } + ], + "optional": true, + "id": "httpAgentflow_0-input-headers-array", + "display": true + }, + { + "label": "Query Params", + "name": "queryParams", + "type": "array", + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "default": "" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "default": "", + "acceptVariable": true + } + ], + "optional": true, + "id": "httpAgentflow_0-input-queryParams-array", + "display": true + }, + { + "label": "Body Type", + "name": "bodyType", + "type": "options", + "options": [ + { + "label": "JSON", + "name": "json" + }, + { + "label": "Raw", + "name": "raw" + }, + { + "label": "Form Data", + "name": "formData" + }, + { + "label": "x-www-form-urlencoded", + "name": "xWwwFormUrlencoded" + } + ], + "optional": true, + "id": "httpAgentflow_0-input-bodyType-options", + "display": true + }, + { + "label": "Body", + "name": "body", + "type": "string", + "acceptVariable": true, + "rows": 4, + "show": { + "bodyType": [ + "raw", + "json" + ] + }, + "optional": true, + "id": "httpAgentflow_0-input-body-string", + "display": true + }, + { + "label": "Body", + "name": "body", + "type": "array", + "acceptVariable": true, + "show": { + "bodyType": [ + "xWwwFormUrlencoded", + "formData" + ] + }, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "default": "" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "default": "", + "acceptVariable": true + } + ], + "optional": true, + "id": "httpAgentflow_0-input-body-array", + "display": false + }, + { + "label": "Response Type", + "name": "responseType", + "type": "options", + "options": [ + { + "label": "JSON", + "name": "json" + }, + { + "label": "Text", + "name": "text" + }, + { + "label": "Array Buffer", + "name": "arraybuffer" + }, + { + "label": "Raw (Base64)", + "name": "base64" + } + ], + "optional": true, + "id": "httpAgentflow_0-input-responseType-options", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "method": "POST", + "url": "http://localhost:5566/events", + "headers": "", + "queryParams": "", + "bodyType": "json", + "responseType": "json", + "body": "

{{ $iteration }}

" + }, + "outputAnchors": [ + { + "id": "httpAgentflow_0-output-httpAgentflow", + "label": "HTTP", + "name": "httpAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 137, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 629.5, + "y": 94.25 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "iterationAgentflow_0", + "targetHandle": "iterationAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#9C89B8", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-iterationAgentflow_0-iterationAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/Supervisor Worker Agents.json b/fr/.gitbook/assets/Supervisor Worker Agents.json new file mode 100644 index 00000000..e0886a6c --- /dev/null +++ b/fr/.gitbook/assets/Supervisor Worker Agents.json @@ -0,0 +1,2116 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -198.4357561998925, + "y": 90.62378754136287 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": [ + { + "key": "next", + "value": "" + }, + { + "key": "instruction", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -198.4357561998925, + "y": 90.62378754136287 + }, + "dragging": false + }, + { + "id": "conditionAgentflow_0", + "position": { + "x": 128.47781848153903, + "y": 73.36847122134466 + }, + "data": { + "id": "conditionAgentflow_0", + "label": "Check next worker", + "version": 1, + "name": "conditionAgentflow", + "type": "Condition", + "color": "#FFB938", + "baseClasses": [ + "Condition" + ], + "category": "Agent Flows", + "description": "Split flows based on If Else conditions", + "inputParams": [ + { + "label": "Conditions", + "name": "conditions", + "type": "array", + "description": "Values to compare", + "acceptVariable": true, + "default": [ + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

SOFTWARE

" + } + ], + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + } + ], + "default": "string" + }, + { + "label": "Value 1", + "name": "value1", + "type": "string", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Contains", + "name": "contains" + }, + { + "label": "Ends With", + "name": "endsWith" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Contains", + "name": "notContains" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Regex", + "name": "regex" + }, + { + "label": "Starts With", + "name": "startsWith" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "string", + "default": "", + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + }, + "hide": { + "conditions[$index].operation": [ + "isEmpty", + "notEmpty" + ] + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "number", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Smaller", + "name": "smaller" + }, + { + "label": "Smaller Equal", + "name": "smallerEqual" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Larger", + "name": "larger" + }, + { + "label": "Larger Equal", + "name": "largerEqual" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "number", + "default": 0, + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "boolean", + "default": false, + "description": "First value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "boolean", + "default": false, + "description": "Second value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + } + ], + "id": "conditionAgentflow_0-input-conditions-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditions": [ + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

SOFTWARE

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

REVIEWER

" + } + ] + }, + "outputAnchors": [ + { + "id": "conditionAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Else" + } + ], + "outputs": { + "conditionAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 194, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 128.47781848153903, + "y": 73.36847122134466 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 352.5679347768288, + "y": -23.510778245391947 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Software Engineer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

As a Senior Software Engineer, you are a pivotal part of our innovative development team. Your expertise and leadership drive the creation of robust, scalable software solutions that meet the needs of our diverse clientele. By applying best practices in software development, you ensure that our products are reliable, efficient, and maintainable.

Your goal is to lead the development of high-quality software solutions.

Utilize your deep technical knowledge and experience to architect, design, and implement software systems that address complex problems. Collaborate closely with other engineers, reviewers to ensure that the solutions you develop align with business objectives and user needs.

Design and implement new feature for the given task, ensuring it integrates seamlessly with existing systems and meets performance requirements. Use your understanding of React, Tailwindcss, NodeJS to build this feature. Make sure to adhere to our coding standards and follow best practices.

The output should be a fully functional, well-documented feature that enhances our product's capabilities. Include detailed comments in the code.

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 191, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 352.5679347768288, + "y": -23.510778245391947 + }, + "dragging": false + }, + { + "id": "agentAgentflow_2", + "position": { + "x": 359.32908043399146, + "y": 88.11650145737843 + }, + "data": { + "id": "agentAgentflow_2", + "label": "Code Reviewer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_2-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_2-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_2-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_2-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_2-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_2-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_2-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_2-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

As a Quality Assurance Engineer, you are an integral part of our development team, ensuring that our software products are of the highest quality. Your meticulous attention to detail and expertise in testing methodologies are crucial in identifying defects and ensuring that our code meets the highest standards.

Your goal is to ensure the delivery of high-quality software through thorough code review and testing.

Review the codebase for the new feature designed and implemented by the Senior Software Engineer. Your expertise goes beyond mere code inspection; you are adept at ensuring that developments not only function as intended but also adhere to the team's coding standards, enhance maintainability, and seamlessly integrate with existing systems.

With a deep appreciation for collaborative development, you provide constructive feedback, guiding contributors towards best practices and fostering a culture of continuous improvement. Your meticulous approach to reviewing code, coupled with your ability to foresee potential issues and recommend proactive solutions, ensures the delivery of high-quality software that is robust, scalable, and aligned with the team's strategic goals.

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_2-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 175, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 359.32908043399146, + "y": 88.11650145737843 + }, + "dragging": false + }, + { + "id": "agentAgentflow_3", + "position": { + "x": 357.60470406099364, + "y": 192.61532204982643 + }, + "data": { + "id": "agentAgentflow_3", + "label": "Generate Final Answer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_3-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_3-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_3-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_3-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_3-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_3-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_3-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_3-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": "", + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

Given the above conversations, generate a detail solution developed by the software engineer and code reviewer.

Your guiding principles:

  1. Preserve Full Context
    Include all code implementations, improvements and review from the conversation. Do not omit, summarize, or oversimplify key information.

  2. Markdown Output Only
    Your final output must be in Markdown format.

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gemini-2.5-flash-preview-05-20", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "agentModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_3-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 283, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 357.60470406099364, + "y": 192.61532204982643 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 572.5888618465789, + "y": -20.827003962303266 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop to Supervisor", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": [ + "Loop" + ], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Supervisor", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 195, + "height": 66, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 572.5888618465789, + "y": -20.827003962303266 + } + }, + { + "id": "loopAgentflow_1", + "position": { + "x": 566.7568359277939, + "y": 90.98824734487103 + }, + "data": { + "id": "loopAgentflow_1", + "label": "Loop to Supervisor", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": [ + "Loop" + ], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_1-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_1-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Supervisor", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 195, + "height": 66, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 566.7568359277939, + "y": 90.98824734487103 + } + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -60.01488766486309, + "y": 87.88377139143167 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Supervisor", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are a supervisor tasked with managing a conversation between the following workers:

- Software Engineer

- Code Reviewer

Given the following user request, respond with the worker to act next.

Each worker will perform a task and respond with their results and status.

When finished, respond with FINISH.

Select strategically to minimize the number of steps taken.

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Given the conversation above, who should act next? Or should we FINISH? Select one of: SOFTWARE, REVIEWER

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "next", + "type": "enum", + "enumValues": "FINISH, SOFTWARE, REVIEWER", + "jsonSchema": "", + "description": "next worker to act" + }, + { + "key": "instructions", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The specific instructions of the sub-task the next worker should accomplish." + }, + { + "key": "reasoning", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The reason why next worker is tasked to do the job" + } + ], + "llmUpdateState": [ + { + "key": "next", + "value": "

{{ output.next }}

" + }, + { + "key": "instruction", + "value": "

{{ output.instructions }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4.1", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 148, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": -60.01488766486309, + "y": 87.88377139143167 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "conditionAgentflow_0", + "targetHandle": "conditionAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFB938", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-conditionAgentflow_0-conditionAgentflow_0" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-0", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-0-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-1", + "target": "agentAgentflow_2", + "targetHandle": "agentAgentflow_2", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-1-agentAgentflow_2-agentAgentflow_2" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-2", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-2-agentAgentflow_3-agentAgentflow_3" + }, + { + "source": "agentAgentflow_1", + "sourceHandle": "agentAgentflow_1-output-agentAgentflow", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_1-agentAgentflow_1-output-agentAgentflow-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "agentAgentflow_2", + "sourceHandle": "agentAgentflow_2-output-agentAgentflow", + "target": "loopAgentflow_1", + "targetHandle": "loopAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_2-agentAgentflow_2-output-agentAgentflow-loopAgentflow_1-loopAgentflow_1" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/ToolAgent Chatflow.json b/fr/.gitbook/assets/ToolAgent Chatflow.json new file mode 100644 index 00000000..1d71ce92 --- /dev/null +++ b/fr/.gitbook/assets/ToolAgent Chatflow.json @@ -0,0 +1,1160 @@ +{ + "nodes": [ + { + "width": 300, + "height": 606, + "id": "pinecone_0", + "position": { + "x": 416.0885364955418, + "y": -74.64623359488957 + }, + "type": "customNode", + "data": { + "id": "pinecone_0", + "label": "Pinecone", + "version": 3, + "name": "pinecone", + "type": "Pinecone", + "baseClasses": [ + "Pinecone", + "VectorStoreRetriever", + "BaseRetriever" + ], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "pineconeApi" + ], + "id": "pinecone_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pinecone_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pinecone_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pinecone_0-input-document-Document" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "Embeddings", + "id": "pinecone_0-input-embeddings-Embeddings" + }, + { + "label": "Record Manager", + "name": "recordManager", + "type": "RecordManager", + "description": "Keep track of the record to prevent duplication", + "optional": true, + "id": "pinecone_0-input-recordManager-RecordManager" + } + ], + "inputs": { + "document": "", + "embeddings": "{{openAIEmbeddings_0.data.instance}}", + "recordManager": "", + "pineconeIndex": "newindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"apple\"}", + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "description": "", + "type": "Pinecone | VectorStoreRetriever | BaseRetriever" + }, + { + "id": "pinecone_0-output-vectorStore-Pinecone|VectorStore", + "name": "vectorStore", + "label": "Pinecone Vector Store", + "description": "", + "type": "Pinecone | VectorStore" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 416.0885364955418, + "y": -74.64623359488957 + }, + "dragging": false + }, + { + "width": 300, + "height": 424, + "id": "openAIEmbeddings_0", + "position": { + "x": 54.119166092646566, + "y": -20.12821243199312 + }, + "type": "customNode", + "data": { + "id": "openAIEmbeddings_0", + "label": "OpenAI Embeddings", + "version": 4, + "name": "openAIEmbeddings", + "type": "OpenAIEmbeddings", + "baseClasses": [ + "OpenAIEmbeddings", + "Embeddings" + ], + "category": "Embeddings", + "description": "OpenAI API to generate embeddings for a given text", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "openAIEmbeddings_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "text-embedding-ada-002", + "id": "openAIEmbeddings_0-input-modelName-asyncOptions" + }, + { + "label": "Strip New Lines", + "name": "stripNewLines", + "type": "boolean", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-stripNewLines-boolean" + }, + { + "label": "Batch Size", + "name": "batchSize", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-batchSize-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-basepath-string" + }, + { + "label": "Dimensions", + "name": "dimensions", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-dimensions-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "text-embedding-ada-002", + "stripNewLines": "", + "batchSize": "", + "timeout": "", + "basepath": "", + "dimensions": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "name": "openAIEmbeddings", + "label": "OpenAIEmbeddings", + "description": "OpenAI API to generate embeddings for a given text", + "type": "OpenAIEmbeddings | Embeddings" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 54.119166092646566, + "y": -20.12821243199312 + }, + "dragging": false + }, + { + "width": 300, + "height": 606, + "id": "pinecone_1", + "position": { + "x": 428.41115568995156, + "y": 549.0169795435812 + }, + "type": "customNode", + "data": { + "id": "pinecone_1", + "label": "Pinecone", + "version": 3, + "name": "pinecone", + "type": "Pinecone", + "baseClasses": [ + "Pinecone", + "VectorStoreRetriever", + "BaseRetriever" + ], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "pineconeApi" + ], + "id": "pinecone_1-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pinecone_1-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pinecone_1-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-lambda-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pinecone_1-input-document-Document" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "Embeddings", + "id": "pinecone_1-input-embeddings-Embeddings" + }, + { + "label": "Record Manager", + "name": "recordManager", + "type": "RecordManager", + "description": "Keep track of the record to prevent duplication", + "optional": true, + "id": "pinecone_1-input-recordManager-RecordManager" + } + ], + "inputs": { + "document": "", + "embeddings": "{{openAIEmbeddings_1.data.instance}}", + "recordManager": "", + "pineconeIndex": "newindex", + "pineconeNamespace": "pinecone-form10k-2", + "pineconeMetadataFilter": "{\"source\":\"tesla\"}", + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "description": "", + "type": "Pinecone | VectorStoreRetriever | BaseRetriever" + }, + { + "id": "pinecone_1-output-vectorStore-Pinecone|VectorStore", + "name": "vectorStore", + "label": "Pinecone Vector Store", + "description": "", + "type": "Pinecone | VectorStore" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 428.41115568995156, + "y": 549.0169795435812 + }, + "dragging": false + }, + { + "width": 300, + "height": 424, + "id": "openAIEmbeddings_1", + "position": { + "x": 58.45057557109914, + "y": 575.7733202609951 + }, + "type": "customNode", + "data": { + "id": "openAIEmbeddings_1", + "label": "OpenAI Embeddings", + "version": 4, + "name": "openAIEmbeddings", + "type": "OpenAIEmbeddings", + "baseClasses": [ + "OpenAIEmbeddings", + "Embeddings" + ], + "category": "Embeddings", + "description": "OpenAI API to generate embeddings for a given text", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "openAIEmbeddings_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "text-embedding-ada-002", + "id": "openAIEmbeddings_1-input-modelName-asyncOptions" + }, + { + "label": "Strip New Lines", + "name": "stripNewLines", + "type": "boolean", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-stripNewLines-boolean" + }, + { + "label": "Batch Size", + "name": "batchSize", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-batchSize-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-basepath-string" + }, + { + "label": "Dimensions", + "name": "dimensions", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-dimensions-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "text-embedding-ada-002", + "stripNewLines": "", + "batchSize": "", + "timeout": "", + "basepath": "", + "dimensions": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "name": "openAIEmbeddings", + "label": "OpenAIEmbeddings", + "description": "OpenAI API to generate embeddings for a given text", + "type": "OpenAIEmbeddings | Embeddings" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 58.45057557109914, + "y": 575.7733202609951 + }, + "dragging": false + }, + { + "width": 300, + "height": 253, + "id": "bufferMemory_0", + "position": { + "x": 825.5960565466753, + "y": 1212.2401709995304 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 2, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": [ + "BufferMemory", + "BaseChatMemory", + "BaseMemory" + ], + "category": "Memory", + "description": "Retrieve chat messages stored in database", + "inputParams": [ + { + "label": "Session Id", + "name": "sessionId", + "type": "string", + "description": "If not specified, a random id will be used. Learn more", + "default": "", + "additionalParams": true, + "optional": true, + "id": "bufferMemory_0-input-sessionId-string" + }, + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "additionalParams": true, + "id": "bufferMemory_0-input-memoryKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "sessionId": "", + "memoryKey": "chat_history" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "description": "Retrieve chat messages stored in database", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 825.5960565466753, + "y": 1212.2401709995304 + }, + "dragging": false + }, + { + "width": 300, + "height": 603, + "id": "retrieverTool_2", + "position": { + "x": 798.3128281367018, + "y": -151.77659673435184 + }, + "type": "customNode", + "data": { + "id": "retrieverTool_2", + "label": "Retriever Tool", + "version": 2, + "name": "retrieverTool", + "type": "RetrieverTool", + "baseClasses": [ + "RetrieverTool", + "DynamicTool", + "Tool", + "StructuredTool", + "Runnable" + ], + "category": "Tools", + "description": "Use a retriever as allowed tool for agent", + "inputParams": [ + { + "label": "Retriever Name", + "name": "name", + "type": "string", + "placeholder": "search_state_of_union", + "id": "retrieverTool_2-input-name-string" + }, + { + "label": "Retriever Description", + "name": "description", + "type": "string", + "description": "When should agent uses to retrieve documents", + "rows": 3, + "placeholder": "Searches and returns documents regarding the state-of-the-union.", + "id": "retrieverTool_2-input-description-string" + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "retrieverTool_2-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Retriever", + "name": "retriever", + "type": "BaseRetriever", + "id": "retrieverTool_2-input-retriever-BaseRetriever" + } + ], + "inputs": { + "name": "search_apple", + "description": "Use this function to answer user questions about Apple Inc (APPL). It contains a SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period.", + "retriever": "{{pinecone_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "retrieverTool_2-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "name": "retrieverTool", + "label": "RetrieverTool", + "type": "RetrieverTool | DynamicTool | Tool | StructuredTool | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 798.3128281367018, + "y": -151.77659673435184 + }, + "dragging": false + }, + { + "width": 300, + "height": 603, + "id": "retrieverTool_1", + "position": { + "x": 805.1192462354428, + "y": 479.4961512574057 + }, + "type": "customNode", + "data": { + "id": "retrieverTool_1", + "label": "Retriever Tool", + "version": 2, + "name": "retrieverTool", + "type": "RetrieverTool", + "baseClasses": [ + "RetrieverTool", + "DynamicTool", + "Tool", + "StructuredTool", + "Runnable" + ], + "category": "Tools", + "description": "Use a retriever as allowed tool for agent", + "inputParams": [ + { + "label": "Retriever Name", + "name": "name", + "type": "string", + "placeholder": "search_state_of_union", + "id": "retrieverTool_1-input-name-string" + }, + { + "label": "Retriever Description", + "name": "description", + "type": "string", + "description": "When should agent uses to retrieve documents", + "rows": 3, + "placeholder": "Searches and returns documents regarding the state-of-the-union.", + "id": "retrieverTool_1-input-description-string" + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "retrieverTool_1-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Retriever", + "name": "retriever", + "type": "BaseRetriever", + "id": "retrieverTool_1-input-retriever-BaseRetriever" + } + ], + "inputs": { + "name": "search_tsla", + "description": "Use this function to answer user questions about Tesla Inc (TSLA). It contains a SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period.", + "retriever": "{{pinecone_1.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "name": "retrieverTool", + "label": "RetrieverTool", + "type": "RetrieverTool | DynamicTool | Tool | StructuredTool | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 805.1192462354428, + "y": 479.4961512574057 + }, + "dragging": false + }, + { + "id": "chatOpenAI_0", + "position": { + "x": 35.45489050081932, + "y": 1052.597243506172 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "version": 6, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "chatOpenAI_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "gpt-3.5-turbo", + "id": "chatOpenAI_0-input-modelName-asyncOptions" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 670, + "selected": false, + "positionAbsolute": { + "x": 35.45489050081932, + "y": 1052.597243506172 + }, + "dragging": false + }, + { + "id": "toolAgent_0", + "position": { + "x": 1275.5041953989141, + "y": 391.50042413140017 + }, + "type": "customNode", + "data": { + "id": "toolAgent_0", + "label": "Tool Agent", + "version": 1, + "name": "toolAgent", + "type": "AgentExecutor", + "baseClasses": [ + "AgentExecutor", + "BaseChain", + "Runnable" + ], + "category": "Agents", + "description": "Agent that uses Function Calling to pick the tools and args to call", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessage", + "type": "string", + "default": "You are a helpful AI assistant.", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "toolAgent_0-input-systemMessage-string" + }, + { + "label": "Max Iterations", + "name": "maxIterations", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "toolAgent_0-input-maxIterations-number" + } + ], + "inputAnchors": [ + { + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "toolAgent_0-input-tools-Tool" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "toolAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Tool Calling Chat Model", + "name": "model", + "type": "BaseChatModel", + "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", + "id": "toolAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "toolAgent_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "tools": [ + "{{retrieverTool_2.data.instance}}", + "{{retrieverTool_1.data.instance}}" + ], + "memory": "{{bufferMemory_0.data.instance}}", + "model": "{{chatOpenAI_0.data.instance}}", + "systemMessage": "You are a helpful AI assistant.", + "inputModeration": "", + "maxIterations": "" + }, + "outputAnchors": [ + { + "id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable", + "name": "toolAgent", + "label": "AgentExecutor", + "description": "Agent that uses Function Calling to pick the tools and args to call", + "type": "AgentExecutor | BaseChain | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 435, + "selected": false, + "positionAbsolute": { + "x": 1275.5041953989141, + "y": 391.50042413140017 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "openAIEmbeddings_0", + "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "target": "pinecone_0", + "targetHandle": "pinecone_0-input-embeddings-Embeddings", + "type": "buttonedge", + "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_0-pinecone_0-input-embeddings-Embeddings" + }, + { + "source": "openAIEmbeddings_1", + "sourceHandle": "openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "target": "pinecone_1", + "targetHandle": "pinecone_1-input-embeddings-Embeddings", + "type": "buttonedge", + "id": "openAIEmbeddings_1-openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_1-pinecone_1-input-embeddings-Embeddings" + }, + { + "source": "pinecone_0", + "sourceHandle": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "target": "retrieverTool_2", + "targetHandle": "retrieverTool_2-input-retriever-BaseRetriever", + "type": "buttonedge", + "id": "pinecone_0-pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-retrieverTool_2-retrieverTool_2-input-retriever-BaseRetriever" + }, + { + "source": "pinecone_1", + "sourceHandle": "pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "target": "retrieverTool_1", + "targetHandle": "retrieverTool_1-input-retriever-BaseRetriever", + "type": "buttonedge", + "id": "pinecone_1-pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-retrieverTool_1-retrieverTool_1-input-retriever-BaseRetriever" + }, + { + "source": "retrieverTool_2", + "sourceHandle": "retrieverTool_2-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "retrieverTool_2-retrieverTool_2-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable-toolAgent_0-toolAgent_0-input-tools-Tool" + }, + { + "source": "retrieverTool_1", + "sourceHandle": "retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "retrieverTool_1-retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable-toolAgent_0-toolAgent_0-input-tools-Tool" + }, + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-toolAgent_0-toolAgent_0-input-memory-BaseChatMemory" + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-model-BaseChatModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-toolAgent_0-toolAgent_0-input-model-BaseChatModel" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/UD_02.png b/fr/.gitbook/assets/UD_02.png new file mode 100644 index 00000000..a606f777 Binary files /dev/null and b/fr/.gitbook/assets/UD_02.png differ diff --git a/fr/.gitbook/assets/UD_03.png b/fr/.gitbook/assets/UD_03.png new file mode 100644 index 00000000..9751382e Binary files /dev/null and b/fr/.gitbook/assets/UD_03.png differ diff --git a/fr/.gitbook/assets/UD_04.png b/fr/.gitbook/assets/UD_04.png new file mode 100644 index 00000000..47bb02dd Binary files /dev/null and b/fr/.gitbook/assets/UD_04.png differ diff --git a/fr/.gitbook/assets/UD_05.png b/fr/.gitbook/assets/UD_05.png new file mode 100644 index 00000000..0bf972b8 Binary files /dev/null and b/fr/.gitbook/assets/UD_05.png differ diff --git a/fr/.gitbook/assets/UD_06.png b/fr/.gitbook/assets/UD_06.png new file mode 100644 index 00000000..7d74d610 Binary files /dev/null and b/fr/.gitbook/assets/UD_06.png differ diff --git a/fr/.gitbook/assets/Untitled (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/Untitled (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c62be673 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/Untitled (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..69019848 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (1) (1) (1) (1).png b/fr/.gitbook/assets/Untitled (1) (1) (1) (1).png new file mode 100644 index 00000000..850962ca Binary files /dev/null and b/fr/.gitbook/assets/Untitled (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (1) (1) (1).png b/fr/.gitbook/assets/Untitled (1) (1) (1).png new file mode 100644 index 00000000..acf1b318 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (1).png b/fr/.gitbook/assets/Untitled (1).png new file mode 100644 index 00000000..45c95757 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (1).png differ diff --git a/fr/.gitbook/assets/Untitled (10).png b/fr/.gitbook/assets/Untitled (10).png new file mode 100644 index 00000000..d7fe3181 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (10).png differ diff --git a/fr/.gitbook/assets/Untitled (2) (1) (1).png b/fr/.gitbook/assets/Untitled (2) (1) (1).png new file mode 100644 index 00000000..33615d39 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (2) (1).png b/fr/.gitbook/assets/Untitled (2) (1).png new file mode 100644 index 00000000..0648527a Binary files /dev/null and b/fr/.gitbook/assets/Untitled (2) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (2).png b/fr/.gitbook/assets/Untitled (2).png new file mode 100644 index 00000000..9cc9bb9d Binary files /dev/null and b/fr/.gitbook/assets/Untitled (2).png differ diff --git a/fr/.gitbook/assets/Untitled (3) (1).png b/fr/.gitbook/assets/Untitled (3) (1).png new file mode 100644 index 00000000..36ea6194 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (3) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (3).png b/fr/.gitbook/assets/Untitled (3).png new file mode 100644 index 00000000..389ddf3d Binary files /dev/null and b/fr/.gitbook/assets/Untitled (3).png differ diff --git a/fr/.gitbook/assets/Untitled (4) (1).png b/fr/.gitbook/assets/Untitled (4) (1).png new file mode 100644 index 00000000..8b45bad3 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (4) (1).png differ diff --git a/fr/.gitbook/assets/Untitled (4).png b/fr/.gitbook/assets/Untitled (4).png new file mode 100644 index 00000000..dfa4a00b Binary files /dev/null and b/fr/.gitbook/assets/Untitled (4).png differ diff --git a/fr/.gitbook/assets/Untitled (5).png b/fr/.gitbook/assets/Untitled (5).png new file mode 100644 index 00000000..062bbf38 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (5).png differ diff --git a/fr/.gitbook/assets/Untitled (7).png b/fr/.gitbook/assets/Untitled (7).png new file mode 100644 index 00000000..10807828 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (7).png differ diff --git a/fr/.gitbook/assets/Untitled (8).png b/fr/.gitbook/assets/Untitled (8).png new file mode 100644 index 00000000..f0386854 Binary files /dev/null and b/fr/.gitbook/assets/Untitled (8).png differ diff --git a/fr/.gitbook/assets/Untitled (9).png b/fr/.gitbook/assets/Untitled (9).png new file mode 100644 index 00000000..03f2588c Binary files /dev/null and b/fr/.gitbook/assets/Untitled (9).png differ diff --git a/fr/.gitbook/assets/Untitled-2024-07-21-0317 (1).png b/fr/.gitbook/assets/Untitled-2024-07-21-0317 (1).png new file mode 100644 index 00000000..efdf8e52 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2024-07-21-0317 (1).png differ diff --git a/fr/.gitbook/assets/Untitled-2024-07-21-0317.png b/fr/.gitbook/assets/Untitled-2024-07-21-0317.png new file mode 100644 index 00000000..29c4005c Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2024-07-21-0317.png differ diff --git a/fr/.gitbook/assets/Untitled-2024-10-19-0050.png b/fr/.gitbook/assets/Untitled-2024-10-19-0050.png new file mode 100644 index 00000000..de2aaac2 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2024-10-19-0050.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-01-23-1520.png b/fr/.gitbook/assets/Untitled-2025-01-23-1520.png new file mode 100644 index 00000000..20daf8dc Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-01-23-1520.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-02-02-1727.png b/fr/.gitbook/assets/Untitled-2025-02-02-1727.png new file mode 100644 index 00000000..5fddc42b Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-02-02-1727.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-03-02-1727.png b/fr/.gitbook/assets/Untitled-2025-03-02-1727.png new file mode 100644 index 00000000..44ae78a9 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-03-02-1727.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-04-02-1727.png b/fr/.gitbook/assets/Untitled-2025-04-02-1727.png new file mode 100644 index 00000000..1360ea32 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-04-02-1727.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-056-02-1727.png b/fr/.gitbook/assets/Untitled-2025-056-02-1727.png new file mode 100644 index 00000000..52ba75a1 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-056-02-1727.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-06-15-0132 (1).png b/fr/.gitbook/assets/Untitled-2025-06-15-0132 (1).png new file mode 100644 index 00000000..aea36a29 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-06-15-0132 (1).png differ diff --git a/fr/.gitbook/assets/Untitled-2025-06-15-0132.png b/fr/.gitbook/assets/Untitled-2025-06-15-0132.png new file mode 100644 index 00000000..99b9a39d Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-06-15-0132.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-06-16-1507.png b/fr/.gitbook/assets/Untitled-2025-06-16-1507.png new file mode 100644 index 00000000..1b214822 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-06-16-1507.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-06-19-1011.png b/fr/.gitbook/assets/Untitled-2025-06-19-1011.png new file mode 100644 index 00000000..05e47d19 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-06-19-1011.png differ diff --git a/fr/.gitbook/assets/Untitled-2025-10-02-1133.png b/fr/.gitbook/assets/Untitled-2025-10-02-1133.png new file mode 100644 index 00000000..3f93f604 Binary files /dev/null and b/fr/.gitbook/assets/Untitled-2025-10-02-1133.png differ diff --git a/fr/.gitbook/assets/Untitled.png b/fr/.gitbook/assets/Untitled.png new file mode 100644 index 00000000..11882563 Binary files /dev/null and b/fr/.gitbook/assets/Untitled.png differ diff --git a/fr/.gitbook/assets/XMLAgent Chatflow.json b/fr/.gitbook/assets/XMLAgent Chatflow.json new file mode 100644 index 00000000..8c478123 --- /dev/null +++ b/fr/.gitbook/assets/XMLAgent Chatflow.json @@ -0,0 +1,1109 @@ +{ + "nodes": [ + { + "width": 300, + "height": 376, + "id": "bufferMemory_0", + "position": { + "x": 363.18868341411155, + "y": 473.71555789686244 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 1, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": [ + "BufferMemory", + "BaseChatMemory", + "BaseMemory" + ], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 363.18868341411155, + "y": 473.71555789686244 + }, + "dragging": false + }, + { + "id": "xmlAgent_0", + "position": { + "x": 1492.5028255133452, + "y": 161.2653235031646 + }, + "type": "customNode", + "data": { + "id": "xmlAgent_0", + "label": "XML Agent", + "version": 2, + "name": "xmlAgent", + "type": "XMLAgent", + "baseClasses": [ + "XMLAgent", + "BaseChain", + "Runnable" + ], + "category": "Agents", + "description": "Agent that is designed for LLMs that are good for reasoning/writing XML (e.g: Anthropic Claude)", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessage", + "type": "string", + "warning": "Prompt must include input variables: {tools}, {chat_history}, {input} and {agent_scratchpad}", + "rows": 4, + "default": "You are a helpful assistant. Help the user answer any questions.\n\nYou have access to the following tools:\n\n{tools}\n\nIn order to use a tool, you can use and tags. You will then get back a response in the form \nFor example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n\nsearchweather in SF\n64 degrees\n\nWhen you are done, respond with a final answer between . For example:\n\nThe weather in SF is 64 degrees\n\nBegin!\n\nPrevious Conversation:\n{chat_history}\n\nQuestion: {input}\n{agent_scratchpad}", + "additionalParams": true, + "id": "xmlAgent_0-input-systemMessage-string" + } + ], + "inputAnchors": [ + { + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "xmlAgent_0-input-tools-Tool" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "xmlAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel", + "id": "xmlAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "xmlAgent_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "tools": [ + "{{retrieverTool_0.data.instance}}", + "{{retrieverTool_1.data.instance}}" + ], + "memory": "{{bufferMemory_0.data.instance}}", + "model": "{{chatAnthropic_0.data.instance}}", + "systemMessage": "You are a helpful assistant. Help the user answer any questions.\n\nYou have access to the following tools:\n\n{tools}\n\nIn order to use a tool, you can use and tags. You will then get back a response in the form \nFor example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n\nsearchweather in SF\n64 degrees\n\nWhen you are done, respond with a final answer between . For example:\n\nThe weather in SF is 64 degrees\n\nBegin!\n\nPrevious Conversation:\n{chat_history}\n\nQuestion: {input}\n{agent_scratchpad}", + "inputModeration": "" + }, + "outputAnchors": [ + { + "id": "xmlAgent_0-output-xmlAgent-XMLAgent|BaseChain|Runnable", + "name": "xmlAgent", + "label": "XMLAgent", + "description": "Agent that is designed for LLMs that are good for reasoning/writing XML (e.g: Anthropic Claude)", + "type": "XMLAgent | BaseChain | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 434, + "selected": false, + "positionAbsolute": { + "x": 1492.5028255133452, + "y": 161.2653235031646 + }, + "dragging": false + }, + { + "id": "chatAnthropic_0", + "position": { + "x": 1073.3453545616378, + "y": 298.65772549403795 + }, + "type": "customNode", + "data": { + "id": "chatAnthropic_0", + "label": "ChatAnthropic", + "version": 5, + "name": "chatAnthropic", + "type": "ChatAnthropic", + "baseClasses": [ + "ChatAnthropic", + "ChatAnthropicMessages", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], + "category": "Chat Models", + "description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "anthropicApi" + ], + "id": "chatAnthropic_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "claude-3-haiku", + "name": "claude-3-haiku-20240307", + "description": "Fastest and most compact model, designed for near-instant responsiveness" + }, + { + "label": "claude-3-opus", + "name": "claude-3-opus-20240229", + "description": "Most powerful model for highly complex tasks" + }, + { + "label": "claude-3-sonnet", + "name": "claude-3-sonnet-20240229", + "description": "Ideal balance of intelligence and speed for enterprise workloads" + }, + { + "label": "claude-2.0 (legacy)", + "name": "claude-2.0", + "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-2.1 (legacy)", + "name": "claude-2.1", + "description": "Claude 2 latest full version" + }, + { + "label": "claude-instant-1.2 (legacy)", + "name": "claude-instant-1.2", + "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + } + ], + "default": "claude-3-haiku", + "optional": true, + "id": "chatAnthropic_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatAnthropic_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokensToSample", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-maxTokensToSample-number" + }, + { + "label": "Top P", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-topP-number" + }, + { + "label": "Top K", + "name": "topK", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-topK-number" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatAnthropic_0-input-allowImageUploads-boolean" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatAnthropic_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "claude-3-sonnet-20240229", + "temperature": "0", + "maxTokensToSample": "", + "topP": "", + "topK": "", + "allowImageUploads": "" + }, + "outputAnchors": [ + { + "id": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|ChatAnthropicMessages|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatAnthropic", + "label": "ChatAnthropic", + "description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint", + "type": "ChatAnthropic | ChatAnthropicMessages | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 671, + "selected": false, + "positionAbsolute": { + "x": 1073.3453545616378, + "y": 298.65772549403795 + }, + "dragging": false + }, + { + "id": "retrieverTool_0", + "position": { + "x": 716.1176116429738, + "y": -19.654771005396583 + }, + "type": "customNode", + "data": { + "id": "retrieverTool_0", + "label": "Retriever Tool", + "version": 2, + "name": "retrieverTool", + "type": "RetrieverTool", + "baseClasses": [ + "RetrieverTool", + "DynamicTool", + "Tool", + "StructuredTool", + "Runnable" + ], + "category": "Tools", + "description": "Use a retriever as allowed tool for agent", + "inputParams": [ + { + "label": "Retriever Name", + "name": "name", + "type": "string", + "placeholder": "search_state_of_union", + "id": "retrieverTool_0-input-name-string" + }, + { + "label": "Retriever Description", + "name": "description", + "type": "string", + "description": "When should agent uses to retrieve documents", + "rows": 3, + "placeholder": "Searches and returns documents regarding the state-of-the-union.", + "id": "retrieverTool_0-input-description-string" + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "retrieverTool_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Retriever", + "name": "retriever", + "type": "BaseRetriever", + "id": "retrieverTool_0-input-retriever-BaseRetriever" + } + ], + "inputs": { + "name": "search_apple", + "description": "Use this function to answer user questions about Apple Inc (APPL). It contains a SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period.", + "retriever": "{{pinecone_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "retrieverTool_0-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "name": "retrieverTool", + "label": "RetrieverTool", + "description": "Use a retriever as allowed tool for agent", + "type": "RetrieverTool | DynamicTool | Tool | StructuredTool | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 602, + "selected": false, + "positionAbsolute": { + "x": 716.1176116429738, + "y": -19.654771005396583 + }, + "dragging": false + }, + { + "id": "pinecone_0", + "position": { + "x": 378.7110351151108, + "y": -122.59235389975663 + }, + "type": "customNode", + "data": { + "id": "pinecone_0", + "label": "Pinecone", + "version": 2, + "name": "pinecone", + "type": "Pinecone", + "baseClasses": [ + "Pinecone", + "VectorStoreRetriever", + "BaseRetriever" + ], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "pineconeApi" + ], + "id": "pinecone_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pinecone_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pinecone_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pinecone_0-input-document-Document" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "Embeddings", + "id": "pinecone_0-input-embeddings-Embeddings" + } + ], + "inputs": { + "document": "", + "embeddings": "{{openAIEmbeddings_0.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"apple\"}", + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "description": "", + "type": "Pinecone | VectorStoreRetriever | BaseRetriever" + }, + { + "id": "pinecone_0-output-vectorStore-Pinecone|VectorStore", + "name": "vectorStore", + "label": "Pinecone Vector Store", + "description": "", + "type": "Pinecone | VectorStore" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "width": 300, + "height": 555, + "selected": false, + "positionAbsolute": { + "x": 378.7110351151108, + "y": -122.59235389975663 + }, + "dragging": false + }, + { + "id": "openAIEmbeddings_0", + "position": { + "x": -12.140161701463555, + "y": -54.69898887572853 + }, + "type": "customNode", + "data": { + "id": "openAIEmbeddings_0", + "label": "OpenAI Embeddings", + "version": 2, + "name": "openAIEmbeddings", + "type": "OpenAIEmbeddings", + "baseClasses": [ + "OpenAIEmbeddings", + "Embeddings" + ], + "category": "Embeddings", + "description": "OpenAI API to generate embeddings for a given text", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "openAIEmbeddings_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbeddings_0-input-modelName-options" + }, + { + "label": "Strip New Lines", + "name": "stripNewLines", + "type": "boolean", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-stripNewLines-boolean" + }, + { + "label": "Batch Size", + "name": "batchSize", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-batchSize-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "text-embedding-ada-002", + "stripNewLines": "", + "batchSize": "", + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "name": "openAIEmbeddings", + "label": "OpenAIEmbeddings", + "description": "OpenAI API to generate embeddings for a given text", + "type": "OpenAIEmbeddings | Embeddings" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 425, + "selected": false, + "positionAbsolute": { + "x": -12.140161701463555, + "y": -54.69898887572853 + }, + "dragging": false + }, + { + "id": "retrieverTool_1", + "position": { + "x": 1095.1510607975026, + "y": -326.06593550420894 + }, + "type": "customNode", + "data": { + "id": "retrieverTool_1", + "label": "Retriever Tool", + "version": 2, + "name": "retrieverTool", + "type": "RetrieverTool", + "baseClasses": [ + "RetrieverTool", + "DynamicTool", + "Tool", + "StructuredTool", + "Runnable" + ], + "category": "Tools", + "description": "Use a retriever as allowed tool for agent", + "inputParams": [ + { + "label": "Retriever Name", + "name": "name", + "type": "string", + "placeholder": "search_state_of_union", + "id": "retrieverTool_1-input-name-string" + }, + { + "label": "Retriever Description", + "name": "description", + "type": "string", + "description": "When should agent uses to retrieve documents", + "rows": 3, + "placeholder": "Searches and returns documents regarding the state-of-the-union.", + "id": "retrieverTool_1-input-description-string" + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "retrieverTool_1-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Retriever", + "name": "retriever", + "type": "BaseRetriever", + "id": "retrieverTool_1-input-retriever-BaseRetriever" + } + ], + "inputs": { + "name": "search_tsla", + "description": "Use this function to answer user questions about Tesla Inc (TSLA). It contains a SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period.", + "retriever": "{{pinecone_1.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "name": "retrieverTool", + "label": "RetrieverTool", + "description": "Use a retriever as allowed tool for agent", + "type": "RetrieverTool | DynamicTool | Tool | StructuredTool | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 602, + "selected": false, + "positionAbsolute": { + "x": 1095.1510607975026, + "y": -326.06593550420894 + }, + "dragging": false + }, + { + "id": "pinecone_1", + "position": { + "x": 762.2373165936692, + "y": -619.4996089374204 + }, + "type": "customNode", + "data": { + "id": "pinecone_1", + "label": "Pinecone", + "version": 2, + "name": "pinecone", + "type": "Pinecone", + "baseClasses": [ + "Pinecone", + "VectorStoreRetriever", + "BaseRetriever" + ], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "pineconeApi" + ], + "id": "pinecone_1-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pinecone_1-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pinecone_1-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_1-input-lambda-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pinecone_1-input-document-Document" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "Embeddings", + "id": "pinecone_1-input-embeddings-Embeddings" + } + ], + "inputs": { + "document": "", + "embeddings": "{{openAIEmbeddings_1.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"tesla\"}", + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "description": "", + "options": [ + { + "id": "pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "description": "", + "type": "Pinecone | VectorStoreRetriever | BaseRetriever" + }, + { + "id": "pinecone_1-output-vectorStore-Pinecone|VectorStore", + "name": "vectorStore", + "label": "Pinecone Vector Store", + "description": "", + "type": "Pinecone | VectorStore" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "retriever" + }, + "selected": false + }, + "width": 300, + "height": 555, + "selected": false, + "positionAbsolute": { + "x": 762.2373165936692, + "y": -619.4996089374204 + }, + "dragging": false + }, + { + "id": "openAIEmbeddings_1", + "position": { + "x": 416.3144430173899, + "y": -599.2302665481047 + }, + "type": "customNode", + "data": { + "id": "openAIEmbeddings_1", + "label": "OpenAI Embeddings", + "version": 2, + "name": "openAIEmbeddings", + "type": "OpenAIEmbeddings", + "baseClasses": [ + "OpenAIEmbeddings", + "Embeddings" + ], + "category": "Embeddings", + "description": "OpenAI API to generate embeddings for a given text", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": [ + "openAIApi" + ], + "id": "openAIEmbeddings_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbeddings_1-input-modelName-options" + }, + { + "label": "Strip New Lines", + "name": "stripNewLines", + "type": "boolean", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-stripNewLines-boolean" + }, + { + "label": "Batch Size", + "name": "batchSize", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-batchSize-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbeddings_1-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "text-embedding-ada-002", + "stripNewLines": "", + "batchSize": "", + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "name": "openAIEmbeddings", + "label": "OpenAIEmbeddings", + "description": "OpenAI API to generate embeddings for a given text", + "type": "OpenAIEmbeddings | Embeddings" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 425, + "selected": false, + "positionAbsolute": { + "x": 416.3144430173899, + "y": -599.2302665481047 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "retrieverTool_0", + "sourceHandle": "retrieverTool_0-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "target": "xmlAgent_0", + "targetHandle": "xmlAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "retrieverTool_0-retrieverTool_0-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable-xmlAgent_0-xmlAgent_0-input-tools-Tool" + }, + { + "source": "chatAnthropic_0", + "sourceHandle": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|ChatAnthropicMessages|BaseChatModel|BaseLanguageModel|Runnable", + "target": "xmlAgent_0", + "targetHandle": "xmlAgent_0-input-model-BaseChatModel", + "type": "buttonedge", + "id": "chatAnthropic_0-chatAnthropic_0-output-chatAnthropic-ChatAnthropic|ChatAnthropicMessages|BaseChatModel|BaseLanguageModel|Runnable-xmlAgent_0-xmlAgent_0-input-model-BaseChatModel" + }, + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "xmlAgent_0", + "targetHandle": "xmlAgent_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-xmlAgent_0-xmlAgent_0-input-memory-BaseChatMemory" + }, + { + "source": "openAIEmbeddings_0", + "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "target": "pinecone_0", + "targetHandle": "pinecone_0-input-embeddings-Embeddings", + "type": "buttonedge", + "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_0-pinecone_0-input-embeddings-Embeddings" + }, + { + "source": "pinecone_0", + "sourceHandle": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "target": "retrieverTool_0", + "targetHandle": "retrieverTool_0-input-retriever-BaseRetriever", + "type": "buttonedge", + "id": "pinecone_0-pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-retrieverTool_0-retrieverTool_0-input-retriever-BaseRetriever" + }, + { + "source": "retrieverTool_1", + "sourceHandle": "retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable", + "target": "xmlAgent_0", + "targetHandle": "xmlAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "retrieverTool_1-retrieverTool_1-output-retrieverTool-RetrieverTool|DynamicTool|Tool|StructuredTool|Runnable-xmlAgent_0-xmlAgent_0-input-tools-Tool" + }, + { + "source": "openAIEmbeddings_1", + "sourceHandle": "openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", + "target": "pinecone_1", + "targetHandle": "pinecone_1-input-embeddings-Embeddings", + "type": "buttonedge", + "id": "openAIEmbeddings_1-openAIEmbeddings_1-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_1-pinecone_1-input-embeddings-Embeddings" + }, + { + "source": "pinecone_1", + "sourceHandle": "pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", + "target": "retrieverTool_1", + "targetHandle": "retrieverTool_1-input-retriever-BaseRetriever", + "type": "buttonedge", + "id": "pinecone_1-pinecone_1-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-retrieverTool_1-retrieverTool_1-input-retriever-BaseRetriever" + } + ] +} \ No newline at end of file diff --git a/fr/.gitbook/assets/agentflow.png b/fr/.gitbook/assets/agentflow.png new file mode 100644 index 00000000..56f87812 Binary files /dev/null and b/fr/.gitbook/assets/agentflow.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/state.png b/fr/.gitbook/assets/agentflowv2/darkmode/state.png new file mode 100644 index 00000000..a0b3327f Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/state.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-d (1).png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-d (1).png new file mode 100644 index 00000000..0a79dfbd Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-d (1).png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-d.png new file mode 100644 index 00000000..b500bbe7 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-l.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-l.png new file mode 100644 index 00000000..73a451ba Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-01-l.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-02-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-02-d.png new file mode 100644 index 00000000..d390e90c Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-02-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-03-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-03-d.png new file mode 100644 index 00000000..12b44523 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-03-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-04-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-04-d.png new file mode 100644 index 00000000..ecaf2b98 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-04-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-05-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-05-d.png new file mode 100644 index 00000000..e5e878a8 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-05-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-06-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-06-d.png new file mode 100644 index 00000000..e03fc9dd Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-06-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-07-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-07-d.png new file mode 100644 index 00000000..ead17d73 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-07-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-08-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-08-d.png new file mode 100644 index 00000000..9ff5504c Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-08-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-09-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-09-d.png new file mode 100644 index 00000000..1beccf66 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-09-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-10-d (1).png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-10-d (1).png new file mode 100644 index 00000000..de3c1573 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-10-d (1).png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-10-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-10-d.png new file mode 100644 index 00000000..48e713b3 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-10-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-11-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-11-d.png new file mode 100644 index 00000000..b08ed99d Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-11-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-12-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-12-d.png new file mode 100644 index 00000000..9afd9f5e Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-12-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-13-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-13-d.png new file mode 100644 index 00000000..2e337950 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-13-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-14-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-14-d.png new file mode 100644 index 00000000..0fc8c181 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-14-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-15-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-15-d.png new file mode 100644 index 00000000..243083db Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-15-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-16-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-16-d.png new file mode 100644 index 00000000..092f9159 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-16-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-17-d.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-17-d.png new file mode 100644 index 00000000..43fbcc43 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-17-d.png differ diff --git a/fr/.gitbook/assets/agentflowv2/darkmode/v2-18-l.png b/fr/.gitbook/assets/agentflowv2/darkmode/v2-18-l.png new file mode 100644 index 00000000..e5a23180 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/darkmode/v2-18-l.png differ diff --git a/fr/.gitbook/assets/agentflowv2/patterns.png b/fr/.gitbook/assets/agentflowv2/patterns.png new file mode 100644 index 00000000..b9055b37 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/patterns.png differ diff --git a/fr/.gitbook/assets/agentflowv2/state.png b/fr/.gitbook/assets/agentflowv2/state.png new file mode 100644 index 00000000..5c754fef Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/state.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-01.png b/fr/.gitbook/assets/agentflowv2/v2-01.png new file mode 100644 index 00000000..2ca998d2 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-01.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-02.png b/fr/.gitbook/assets/agentflowv2/v2-02.png new file mode 100644 index 00000000..e0800742 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-02.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-03.png b/fr/.gitbook/assets/agentflowv2/v2-03.png new file mode 100644 index 00000000..c86748ca Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-03.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-04.png b/fr/.gitbook/assets/agentflowv2/v2-04.png new file mode 100644 index 00000000..c5a32299 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-04.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-05.png b/fr/.gitbook/assets/agentflowv2/v2-05.png new file mode 100644 index 00000000..1d6641c6 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-05.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-06.png b/fr/.gitbook/assets/agentflowv2/v2-06.png new file mode 100644 index 00000000..feddda3f Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-06.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-07.png b/fr/.gitbook/assets/agentflowv2/v2-07.png new file mode 100644 index 00000000..5cb4edc6 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-07.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-08.png b/fr/.gitbook/assets/agentflowv2/v2-08.png new file mode 100644 index 00000000..5b567b55 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-08.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-09.png b/fr/.gitbook/assets/agentflowv2/v2-09.png new file mode 100644 index 00000000..f39c5e42 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-09.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-10.png b/fr/.gitbook/assets/agentflowv2/v2-10.png new file mode 100644 index 00000000..57c7eeec Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-10.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-11.png b/fr/.gitbook/assets/agentflowv2/v2-11.png new file mode 100644 index 00000000..f403219f Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-11.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-12.png b/fr/.gitbook/assets/agentflowv2/v2-12.png new file mode 100644 index 00000000..0ac5c798 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-12.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-13.png b/fr/.gitbook/assets/agentflowv2/v2-13.png new file mode 100644 index 00000000..ed07e1c4 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-13.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-14.png b/fr/.gitbook/assets/agentflowv2/v2-14.png new file mode 100644 index 00000000..1caae4f5 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-14.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-15.png b/fr/.gitbook/assets/agentflowv2/v2-15.png new file mode 100644 index 00000000..7e2d4961 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-15.png differ diff --git a/fr/.gitbook/assets/agentflowv2/v2-all-nodes.png b/fr/.gitbook/assets/agentflowv2/v2-all-nodes.png new file mode 100644 index 00000000..c20ed065 Binary files /dev/null and b/fr/.gitbook/assets/agentflowv2/v2-all-nodes.png differ diff --git a/fr/.gitbook/assets/analytic-1 (1).webp b/fr/.gitbook/assets/analytic-1 (1).webp new file mode 100644 index 00000000..c4693d4d Binary files /dev/null and b/fr/.gitbook/assets/analytic-1 (1).webp differ diff --git a/fr/.gitbook/assets/analytic-1.webp b/fr/.gitbook/assets/analytic-1.webp new file mode 100644 index 00000000..c4693d4d Binary files /dev/null and b/fr/.gitbook/assets/analytic-1.webp differ diff --git a/fr/.gitbook/assets/analytic-2.png b/fr/.gitbook/assets/analytic-2.png new file mode 100644 index 00000000..93e50237 Binary files /dev/null and b/fr/.gitbook/assets/analytic-2.png differ diff --git a/fr/.gitbook/assets/arize/arize-1.png b/fr/.gitbook/assets/arize/arize-1.png new file mode 100644 index 00000000..43f9f284 Binary files /dev/null and b/fr/.gitbook/assets/arize/arize-1.png differ diff --git a/fr/.gitbook/assets/arize/arize-2.png b/fr/.gitbook/assets/arize/arize-2.png new file mode 100644 index 00000000..27c93cbe Binary files /dev/null and b/fr/.gitbook/assets/arize/arize-2.png differ diff --git a/fr/.gitbook/assets/arize/arize-3.png b/fr/.gitbook/assets/arize/arize-3.png new file mode 100644 index 00000000..bfc69837 Binary files /dev/null and b/fr/.gitbook/assets/arize/arize-3.png differ diff --git a/fr/.gitbook/assets/azure/azure-chatopenai/1.png b/fr/.gitbook/assets/azure/azure-chatopenai/1.png new file mode 100644 index 00000000..acc35368 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-chatopenai/1.png differ diff --git a/fr/.gitbook/assets/azure/azure-chatopenai/2.png b/fr/.gitbook/assets/azure/azure-chatopenai/2.png new file mode 100644 index 00000000..372177be Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-chatopenai/2.png differ diff --git a/fr/.gitbook/assets/azure/azure-chatopenai/3.png b/fr/.gitbook/assets/azure/azure-chatopenai/3.png new file mode 100644 index 00000000..7d887c79 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-chatopenai/3.png differ diff --git a/fr/.gitbook/assets/azure/azure-chatopenai/4.png b/fr/.gitbook/assets/azure/azure-chatopenai/4.png new file mode 100644 index 00000000..03792722 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-chatopenai/4.png differ diff --git a/fr/.gitbook/assets/azure/azure-chatopenai/5.png b/fr/.gitbook/assets/azure/azure-chatopenai/5.png new file mode 100644 index 00000000..d53b56b1 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-chatopenai/5.png differ diff --git a/fr/.gitbook/assets/azure/azure-general/1.png b/fr/.gitbook/assets/azure/azure-general/1.png new file mode 100644 index 00000000..72057c9d Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-general/1.png differ diff --git a/fr/.gitbook/assets/azure/azure-general/2.png b/fr/.gitbook/assets/azure/azure-general/2.png new file mode 100644 index 00000000..7b13de56 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-general/2.png differ diff --git a/fr/.gitbook/assets/azure/azure-general/3.png b/fr/.gitbook/assets/azure/azure-general/3.png new file mode 100644 index 00000000..ca86df36 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-general/3.png differ diff --git a/fr/.gitbook/assets/azure/azure-general/4.png b/fr/.gitbook/assets/azure/azure-general/4.png new file mode 100644 index 00000000..1ab63c85 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-general/4.png differ diff --git a/fr/.gitbook/assets/azure/azure-general/5.png b/fr/.gitbook/assets/azure/azure-general/5.png new file mode 100644 index 00000000..ba561ad0 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-general/5.png differ diff --git a/fr/.gitbook/assets/azure/azure-openai-embeddings/1.png b/fr/.gitbook/assets/azure/azure-openai-embeddings/1.png new file mode 100644 index 00000000..e2cba6ab Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-openai-embeddings/1.png differ diff --git a/fr/.gitbook/assets/azure/azure-openai-embeddings/2.png b/fr/.gitbook/assets/azure/azure-openai-embeddings/2.png new file mode 100644 index 00000000..146bef6f Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-openai-embeddings/2.png differ diff --git a/fr/.gitbook/assets/azure/azure-openai-embeddings/3.png b/fr/.gitbook/assets/azure/azure-openai-embeddings/3.png new file mode 100644 index 00000000..bfca03d6 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-openai-embeddings/3.png differ diff --git a/fr/.gitbook/assets/azure/azure-openai-embeddings/4.png b/fr/.gitbook/assets/azure/azure-openai-embeddings/4.png new file mode 100644 index 00000000..39f7eaa4 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-openai-embeddings/4.png differ diff --git a/fr/.gitbook/assets/azure/azure-openai-embeddings/5.png b/fr/.gitbook/assets/azure/azure-openai-embeddings/5.png new file mode 100644 index 00000000..a6a0b630 Binary files /dev/null and b/fr/.gitbook/assets/azure/azure-openai-embeddings/5.png differ diff --git a/fr/.gitbook/assets/azure/deployment/1.png b/fr/.gitbook/assets/azure/deployment/1.png new file mode 100644 index 00000000..bcf31620 Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/1.png differ diff --git a/fr/.gitbook/assets/azure/deployment/2.png b/fr/.gitbook/assets/azure/deployment/2.png new file mode 100644 index 00000000..ea7faa2b Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/2.png differ diff --git a/fr/.gitbook/assets/azure/deployment/3.png b/fr/.gitbook/assets/azure/deployment/3.png new file mode 100644 index 00000000..b873f564 Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/3.png differ diff --git a/fr/.gitbook/assets/azure/deployment/4.png b/fr/.gitbook/assets/azure/deployment/4.png new file mode 100644 index 00000000..d2c2dac7 Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/4.png differ diff --git a/fr/.gitbook/assets/azure/deployment/5.png b/fr/.gitbook/assets/azure/deployment/5.png new file mode 100644 index 00000000..98a50e18 Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/5.png differ diff --git a/fr/.gitbook/assets/azure/deployment/6.png b/fr/.gitbook/assets/azure/deployment/6.png new file mode 100644 index 00000000..af3fe0dd Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/6.png differ diff --git a/fr/.gitbook/assets/azure/deployment/7.png b/fr/.gitbook/assets/azure/deployment/7.png new file mode 100644 index 00000000..6412cab6 Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/7.png differ diff --git a/fr/.gitbook/assets/azure/deployment/8.png b/fr/.gitbook/assets/azure/deployment/8.png new file mode 100644 index 00000000..9593bdd1 Binary files /dev/null and b/fr/.gitbook/assets/azure/deployment/8.png differ diff --git a/fr/.gitbook/assets/chat-prompt-template-file-attachment.jpg b/fr/.gitbook/assets/chat-prompt-template-file-attachment.jpg new file mode 100644 index 00000000..267aaead Binary files /dev/null and b/fr/.gitbook/assets/chat-prompt-template-file-attachment.jpg differ diff --git a/fr/.gitbook/assets/chatcometapi_node.png b/fr/.gitbook/assets/chatcometapi_node.png new file mode 100644 index 00000000..316f4ec6 Binary files /dev/null and b/fr/.gitbook/assets/chatcometapi_node.png differ diff --git a/fr/.gitbook/assets/conditions.png b/fr/.gitbook/assets/conditions.png new file mode 100644 index 00000000..39eb4e60 Binary files /dev/null and b/fr/.gitbook/assets/conditions.png differ diff --git a/fr/.gitbook/assets/contextengineering1.png b/fr/.gitbook/assets/contextengineering1.png new file mode 100644 index 00000000..deb6c93a Binary files /dev/null and b/fr/.gitbook/assets/contextengineering1.png differ diff --git a/fr/.gitbook/assets/couchbase_1.png b/fr/.gitbook/assets/couchbase_1.png new file mode 100644 index 00000000..84b8a851 Binary files /dev/null and b/fr/.gitbook/assets/couchbase_1.png differ diff --git a/fr/.gitbook/assets/couchbase_2.png b/fr/.gitbook/assets/couchbase_2.png new file mode 100644 index 00000000..ff321214 Binary files /dev/null and b/fr/.gitbook/assets/couchbase_2.png differ diff --git a/fr/.gitbook/assets/couchbase_3.png b/fr/.gitbook/assets/couchbase_3.png new file mode 100644 index 00000000..4d9547d8 Binary files /dev/null and b/fr/.gitbook/assets/couchbase_3.png differ diff --git a/fr/.gitbook/assets/couchbase_4.png b/fr/.gitbook/assets/couchbase_4.png new file mode 100644 index 00000000..77365936 Binary files /dev/null and b/fr/.gitbook/assets/couchbase_4.png differ diff --git a/fr/.gitbook/assets/dark.png b/fr/.gitbook/assets/dark.png new file mode 100644 index 00000000..2c3b0361 Binary files /dev/null and b/fr/.gitbook/assets/dark.png differ diff --git a/fr/.gitbook/assets/dastore002.png b/fr/.gitbook/assets/dastore002.png new file mode 100644 index 00000000..344a1377 Binary files /dev/null and b/fr/.gitbook/assets/dastore002.png differ diff --git a/fr/.gitbook/assets/dastore003.png b/fr/.gitbook/assets/dastore003.png new file mode 100644 index 00000000..3a7b295c Binary files /dev/null and b/fr/.gitbook/assets/dastore003.png differ diff --git a/fr/.gitbook/assets/dastore004.png b/fr/.gitbook/assets/dastore004.png new file mode 100644 index 00000000..4fde8257 Binary files /dev/null and b/fr/.gitbook/assets/dastore004.png differ diff --git a/fr/.gitbook/assets/dastore005.png b/fr/.gitbook/assets/dastore005.png new file mode 100644 index 00000000..f6d9375f Binary files /dev/null and b/fr/.gitbook/assets/dastore005.png differ diff --git a/fr/.gitbook/assets/dastore006.png b/fr/.gitbook/assets/dastore006.png new file mode 100644 index 00000000..19f16116 Binary files /dev/null and b/fr/.gitbook/assets/dastore006.png differ diff --git a/fr/.gitbook/assets/dastore007.png b/fr/.gitbook/assets/dastore007.png new file mode 100644 index 00000000..b6723d76 Binary files /dev/null and b/fr/.gitbook/assets/dastore007.png differ diff --git a/fr/.gitbook/assets/dastore009.png b/fr/.gitbook/assets/dastore009.png new file mode 100644 index 00000000..ff7e8ee6 Binary files /dev/null and b/fr/.gitbook/assets/dastore009.png differ diff --git a/fr/.gitbook/assets/dastore010.png b/fr/.gitbook/assets/dastore010.png new file mode 100644 index 00000000..f1af2b19 Binary files /dev/null and b/fr/.gitbook/assets/dastore010.png differ diff --git a/fr/.gitbook/assets/dastore011.png b/fr/.gitbook/assets/dastore011.png new file mode 100644 index 00000000..76f34028 Binary files /dev/null and b/fr/.gitbook/assets/dastore011.png differ diff --git a/fr/.gitbook/assets/dastore013.png b/fr/.gitbook/assets/dastore013.png new file mode 100644 index 00000000..ff23b34f Binary files /dev/null and b/fr/.gitbook/assets/dastore013.png differ diff --git a/fr/.gitbook/assets/ds01.png b/fr/.gitbook/assets/ds01.png new file mode 100644 index 00000000..f367451a Binary files /dev/null and b/fr/.gitbook/assets/ds01.png differ diff --git a/fr/.gitbook/assets/ds02.png b/fr/.gitbook/assets/ds02.png new file mode 100644 index 00000000..1d35e852 Binary files /dev/null and b/fr/.gitbook/assets/ds02.png differ diff --git a/fr/.gitbook/assets/ds03.png b/fr/.gitbook/assets/ds03.png new file mode 100644 index 00000000..3c7baf36 Binary files /dev/null and b/fr/.gitbook/assets/ds03.png differ diff --git a/fr/.gitbook/assets/ds04.png b/fr/.gitbook/assets/ds04.png new file mode 100644 index 00000000..09627178 Binary files /dev/null and b/fr/.gitbook/assets/ds04.png differ diff --git a/fr/.gitbook/assets/ds05.png b/fr/.gitbook/assets/ds05.png new file mode 100644 index 00000000..11aa1b97 Binary files /dev/null and b/fr/.gitbook/assets/ds05.png differ diff --git a/fr/.gitbook/assets/ds06.png b/fr/.gitbook/assets/ds06.png new file mode 100644 index 00000000..a0079416 Binary files /dev/null and b/fr/.gitbook/assets/ds06.png differ diff --git a/fr/.gitbook/assets/ds07.png b/fr/.gitbook/assets/ds07.png new file mode 100644 index 00000000..ed6d4a98 Binary files /dev/null and b/fr/.gitbook/assets/ds07.png differ diff --git a/fr/.gitbook/assets/ds08.png b/fr/.gitbook/assets/ds08.png new file mode 100644 index 00000000..71685a91 Binary files /dev/null and b/fr/.gitbook/assets/ds08.png differ diff --git a/fr/.gitbook/assets/ds09.png b/fr/.gitbook/assets/ds09.png new file mode 100644 index 00000000..6fbb54a8 Binary files /dev/null and b/fr/.gitbook/assets/ds09.png differ diff --git a/fr/.gitbook/assets/ds10.png b/fr/.gitbook/assets/ds10.png new file mode 100644 index 00000000..1d02a2bd Binary files /dev/null and b/fr/.gitbook/assets/ds10.png differ diff --git a/fr/.gitbook/assets/ds11.png b/fr/.gitbook/assets/ds11.png new file mode 100644 index 00000000..24deedee Binary files /dev/null and b/fr/.gitbook/assets/ds11.png differ diff --git a/fr/.gitbook/assets/ds12.png b/fr/.gitbook/assets/ds12.png new file mode 100644 index 00000000..0cf27b10 Binary files /dev/null and b/fr/.gitbook/assets/ds12.png differ diff --git a/fr/.gitbook/assets/ds13.png b/fr/.gitbook/assets/ds13.png new file mode 100644 index 00000000..7d74609e Binary files /dev/null and b/fr/.gitbook/assets/ds13.png differ diff --git a/fr/.gitbook/assets/ds15.png b/fr/.gitbook/assets/ds15.png new file mode 100644 index 00000000..9026ab88 Binary files /dev/null and b/fr/.gitbook/assets/ds15.png differ diff --git a/fr/.gitbook/assets/elastic1.png b/fr/.gitbook/assets/elastic1.png new file mode 100644 index 00000000..be5fe37b Binary files /dev/null and b/fr/.gitbook/assets/elastic1.png differ diff --git a/fr/.gitbook/assets/elastic10.png b/fr/.gitbook/assets/elastic10.png new file mode 100644 index 00000000..8ffc19b4 Binary files /dev/null and b/fr/.gitbook/assets/elastic10.png differ diff --git a/fr/.gitbook/assets/elastic11.png b/fr/.gitbook/assets/elastic11.png new file mode 100644 index 00000000..06307d7c Binary files /dev/null and b/fr/.gitbook/assets/elastic11.png differ diff --git a/fr/.gitbook/assets/elastic12.png b/fr/.gitbook/assets/elastic12.png new file mode 100644 index 00000000..546f772f Binary files /dev/null and b/fr/.gitbook/assets/elastic12.png differ diff --git a/fr/.gitbook/assets/elastic13.png b/fr/.gitbook/assets/elastic13.png new file mode 100644 index 00000000..3ed8b9b3 Binary files /dev/null and b/fr/.gitbook/assets/elastic13.png differ diff --git a/fr/.gitbook/assets/elastic2.png b/fr/.gitbook/assets/elastic2.png new file mode 100644 index 00000000..1e562041 Binary files /dev/null and b/fr/.gitbook/assets/elastic2.png differ diff --git a/fr/.gitbook/assets/elastic3.png b/fr/.gitbook/assets/elastic3.png new file mode 100644 index 00000000..427e6221 Binary files /dev/null and b/fr/.gitbook/assets/elastic3.png differ diff --git a/fr/.gitbook/assets/elastic4.png b/fr/.gitbook/assets/elastic4.png new file mode 100644 index 00000000..7af0ed03 Binary files /dev/null and b/fr/.gitbook/assets/elastic4.png differ diff --git a/fr/.gitbook/assets/elastic5.png b/fr/.gitbook/assets/elastic5.png new file mode 100644 index 00000000..5c56ee8f Binary files /dev/null and b/fr/.gitbook/assets/elastic5.png differ diff --git a/fr/.gitbook/assets/elastic6.png b/fr/.gitbook/assets/elastic6.png new file mode 100644 index 00000000..8cf14213 Binary files /dev/null and b/fr/.gitbook/assets/elastic6.png differ diff --git a/fr/.gitbook/assets/elastic7.png b/fr/.gitbook/assets/elastic7.png new file mode 100644 index 00000000..e5a960b9 Binary files /dev/null and b/fr/.gitbook/assets/elastic7.png differ diff --git a/fr/.gitbook/assets/elastic8.png b/fr/.gitbook/assets/elastic8.png new file mode 100644 index 00000000..e6a04e8a Binary files /dev/null and b/fr/.gitbook/assets/elastic8.png differ diff --git a/fr/.gitbook/assets/elastic9.png b/fr/.gitbook/assets/elastic9.png new file mode 100644 index 00000000..25533262 Binary files /dev/null and b/fr/.gitbook/assets/elastic9.png differ diff --git a/fr/.gitbook/assets/flowise.gif b/fr/.gitbook/assets/flowise.gif new file mode 100644 index 00000000..07101bc2 Binary files /dev/null and b/fr/.gitbook/assets/flowise.gif differ diff --git a/fr/.gitbook/assets/gcp/1.png b/fr/.gitbook/assets/gcp/1.png new file mode 100644 index 00000000..5567b6e4 Binary files /dev/null and b/fr/.gitbook/assets/gcp/1.png differ diff --git a/fr/.gitbook/assets/gcp/2.png b/fr/.gitbook/assets/gcp/2.png new file mode 100644 index 00000000..38e69e26 Binary files /dev/null and b/fr/.gitbook/assets/gcp/2.png differ diff --git a/fr/.gitbook/assets/gcp/3.png b/fr/.gitbook/assets/gcp/3.png new file mode 100644 index 00000000..31a74b57 Binary files /dev/null and b/fr/.gitbook/assets/gcp/3.png differ diff --git a/fr/.gitbook/assets/gcp/4.png b/fr/.gitbook/assets/gcp/4.png new file mode 100644 index 00000000..9da60626 Binary files /dev/null and b/fr/.gitbook/assets/gcp/4.png differ diff --git a/fr/.gitbook/assets/gcp/5.png b/fr/.gitbook/assets/gcp/5.png new file mode 100644 index 00000000..b261f7dc Binary files /dev/null and b/fr/.gitbook/assets/gcp/5.png differ diff --git a/fr/.gitbook/assets/gcp_credential/add_key.png b/fr/.gitbook/assets/gcp_credential/add_key.png new file mode 100644 index 00000000..f5bc8779 Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/add_key.png differ diff --git a/fr/.gitbook/assets/gcp_credential/create_credential.png b/fr/.gitbook/assets/gcp_credential/create_credential.png new file mode 100644 index 00000000..2f971263 Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/create_credential.png differ diff --git a/fr/.gitbook/assets/gcp_credential/create_key.png b/fr/.gitbook/assets/gcp_credential/create_key.png new file mode 100644 index 00000000..f3ae424f Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/create_key.png differ diff --git a/fr/.gitbook/assets/gcp_credential/create_service_account.png b/fr/.gitbook/assets/gcp_credential/create_service_account.png new file mode 100644 index 00000000..cf02cf12 Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/create_service_account.png differ diff --git a/fr/.gitbook/assets/gcp_credential/google_vertex_auth.png b/fr/.gitbook/assets/gcp_credential/google_vertex_auth.png new file mode 100644 index 00000000..8d850444 Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/google_vertex_auth.png differ diff --git a/fr/.gitbook/assets/gcp_credential/register_credential.png b/fr/.gitbook/assets/gcp_credential/register_credential.png new file mode 100644 index 00000000..4eb2ee58 Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/register_credential.png differ diff --git a/fr/.gitbook/assets/gcp_credential/select_role.png b/fr/.gitbook/assets/gcp_credential/select_role.png new file mode 100644 index 00000000..d8eb25fa Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/select_role.png differ diff --git a/fr/.gitbook/assets/gcp_credential/vertex_AI_enable.png b/fr/.gitbook/assets/gcp_credential/vertex_AI_enable.png new file mode 100644 index 00000000..7fa3cc19 Binary files /dev/null and b/fr/.gitbook/assets/gcp_credential/vertex_AI_enable.png differ diff --git a/fr/.gitbook/assets/giphy.gif b/fr/.gitbook/assets/giphy.gif new file mode 100644 index 00000000..67b89396 Binary files /dev/null and b/fr/.gitbook/assets/giphy.gif differ diff --git a/fr/.gitbook/assets/google_ai/1.png b/fr/.gitbook/assets/google_ai/1.png new file mode 100644 index 00000000..986cbb58 Binary files /dev/null and b/fr/.gitbook/assets/google_ai/1.png differ diff --git a/fr/.gitbook/assets/google_ai/2.png b/fr/.gitbook/assets/google_ai/2.png new file mode 100644 index 00000000..eb38783f Binary files /dev/null and b/fr/.gitbook/assets/google_ai/2.png differ diff --git a/fr/.gitbook/assets/google_ai/3.png b/fr/.gitbook/assets/google_ai/3.png new file mode 100644 index 00000000..b9317a13 Binary files /dev/null and b/fr/.gitbook/assets/google_ai/3.png differ diff --git a/fr/.gitbook/assets/google_ai/4.png b/fr/.gitbook/assets/google_ai/4.png new file mode 100644 index 00000000..a4892c89 Binary files /dev/null and b/fr/.gitbook/assets/google_ai/4.png differ diff --git a/fr/.gitbook/assets/google_ai/5.png b/fr/.gitbook/assets/google_ai/5.png new file mode 100644 index 00000000..0a98c385 Binary files /dev/null and b/fr/.gitbook/assets/google_ai/5.png differ diff --git a/fr/.gitbook/assets/google_ai/6.png b/fr/.gitbook/assets/google_ai/6.png new file mode 100644 index 00000000..8147cce7 Binary files /dev/null and b/fr/.gitbook/assets/google_ai/6.png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..a526cf08 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..3bb4b8f6 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..34c6a450 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..3d01ff6e Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..e4476d4a Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..3d7f3708 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..acca8811 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..dbd0aa75 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..0ee4d8a6 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..6ad2816a Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..6e93ba22 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..96a22bd6 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..fdd17721 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..b3871bd9 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png new file mode 100644 index 00000000..01d34008 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..7ef75937 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..9dd4614e Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..9020bb70 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..84a0ce96 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..340f570b Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c1b7ba8a Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..efdbc782 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..037d1f51 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..a623e97b Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..bcd88739 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..b082f202 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..f0b230ff Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..a15352be Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..7a74d953 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..f215b2ae Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..4eb0dd44 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..def92321 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..22b7d480 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..cc83af61 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..4530edde Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..68be3658 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..ac18f752 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png new file mode 100644 index 00000000..3c94a1dc Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..bc1f831a Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..b9aa1d82 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..15be701e Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..5338dc1f Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..4dfcd5b3 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..6ac0d59c Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..558b4cad Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (1).png new file mode 100644 index 00000000..d092f8d7 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..8e38fc1e Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (1) (2).png new file mode 100644 index 00000000..803c3975 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (1).png new file mode 100644 index 00000000..cca977e9 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (2) (1).png b/fr/.gitbook/assets/image (1) (1) (2) (1).png new file mode 100644 index 00000000..59122a4f Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (2).png b/fr/.gitbook/assets/image (1) (1) (2).png new file mode 100644 index 00000000..ce87876b Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (3) (1) (1).png b/fr/.gitbook/assets/image (1) (1) (3) (1) (1).png new file mode 100644 index 00000000..9188b08c Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (3) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (3) (1).png b/fr/.gitbook/assets/image (1) (1) (3) (1).png new file mode 100644 index 00000000..a669684b Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (3) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (1) (3).png b/fr/.gitbook/assets/image (1) (1) (3).png new file mode 100644 index 00000000..a7aa0c6f Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1) (3).png differ diff --git a/fr/.gitbook/assets/image (1) (1).png b/fr/.gitbook/assets/image (1) (1).png new file mode 100644 index 00000000..8c466dad Binary files /dev/null and b/fr/.gitbook/assets/image (1) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (2) (1).png b/fr/.gitbook/assets/image (1) (2) (1).png new file mode 100644 index 00000000..ebba846f Binary files /dev/null and b/fr/.gitbook/assets/image (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (1) (2).png b/fr/.gitbook/assets/image (1) (2).png new file mode 100644 index 00000000..42228062 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (2).png differ diff --git a/fr/.gitbook/assets/image (1) (3).png b/fr/.gitbook/assets/image (1) (3).png new file mode 100644 index 00000000..d8512040 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (3).png differ diff --git a/fr/.gitbook/assets/image (1) (4).png b/fr/.gitbook/assets/image (1) (4).png new file mode 100644 index 00000000..f5860f26 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (4).png differ diff --git a/fr/.gitbook/assets/image (1) (5).png b/fr/.gitbook/assets/image (1) (5).png new file mode 100644 index 00000000..775a46e5 Binary files /dev/null and b/fr/.gitbook/assets/image (1) (5).png differ diff --git a/fr/.gitbook/assets/image (1).png b/fr/.gitbook/assets/image (1).png new file mode 100644 index 00000000..c80dfd58 Binary files /dev/null and b/fr/.gitbook/assets/image (1).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..2627f3a5 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..619c9b67 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..d5db1644 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..0e574164 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..841850eb Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (10) (1) (1) (1) (1).png new file mode 100644 index 00000000..84c68e31 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (10) (1) (1) (1) (2).png new file mode 100644 index 00000000..f4565750 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (1).png b/fr/.gitbook/assets/image (10) (1) (1) (1).png new file mode 100644 index 00000000..e4cfae74 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1) (2).png b/fr/.gitbook/assets/image (10) (1) (1) (2).png new file mode 100644 index 00000000..b379ec18 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (1).png b/fr/.gitbook/assets/image (10) (1) (1).png new file mode 100644 index 00000000..0a3f033b Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (1) (2).png b/fr/.gitbook/assets/image (10) (1) (2).png new file mode 100644 index 00000000..d456aea7 Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (10) (1).png b/fr/.gitbook/assets/image (10) (1).png new file mode 100644 index 00000000..7f8047af Binary files /dev/null and b/fr/.gitbook/assets/image (10) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (2) (1).png b/fr/.gitbook/assets/image (10) (2) (1).png new file mode 100644 index 00000000..7f52f55a Binary files /dev/null and b/fr/.gitbook/assets/image (10) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (10) (2).png b/fr/.gitbook/assets/image (10) (2).png new file mode 100644 index 00000000..32f0646b Binary files /dev/null and b/fr/.gitbook/assets/image (10) (2).png differ diff --git a/fr/.gitbook/assets/image (10) (3).png b/fr/.gitbook/assets/image (10) (3).png new file mode 100644 index 00000000..ac53208d Binary files /dev/null and b/fr/.gitbook/assets/image (10) (3).png differ diff --git a/fr/.gitbook/assets/image (10).png b/fr/.gitbook/assets/image (10).png new file mode 100644 index 00000000..dd905ffd Binary files /dev/null and b/fr/.gitbook/assets/image (10).png differ diff --git a/fr/.gitbook/assets/image (100).png b/fr/.gitbook/assets/image (100).png new file mode 100644 index 00000000..1030565b Binary files /dev/null and b/fr/.gitbook/assets/image (100).png differ diff --git a/fr/.gitbook/assets/image (101).png b/fr/.gitbook/assets/image (101).png new file mode 100644 index 00000000..43503a93 Binary files /dev/null and b/fr/.gitbook/assets/image (101).png differ diff --git a/fr/.gitbook/assets/image (102).png b/fr/.gitbook/assets/image (102).png new file mode 100644 index 00000000..b4bb4cd9 Binary files /dev/null and b/fr/.gitbook/assets/image (102).png differ diff --git a/fr/.gitbook/assets/image (103).png b/fr/.gitbook/assets/image (103).png new file mode 100644 index 00000000..661f4981 Binary files /dev/null and b/fr/.gitbook/assets/image (103).png differ diff --git a/fr/.gitbook/assets/image (104).png b/fr/.gitbook/assets/image (104).png new file mode 100644 index 00000000..72d1b916 Binary files /dev/null and b/fr/.gitbook/assets/image (104).png differ diff --git a/fr/.gitbook/assets/image (105).png b/fr/.gitbook/assets/image (105).png new file mode 100644 index 00000000..f39017cc Binary files /dev/null and b/fr/.gitbook/assets/image (105).png differ diff --git a/fr/.gitbook/assets/image (106).png b/fr/.gitbook/assets/image (106).png new file mode 100644 index 00000000..b74bee50 Binary files /dev/null and b/fr/.gitbook/assets/image (106).png differ diff --git a/fr/.gitbook/assets/image (107).png b/fr/.gitbook/assets/image (107).png new file mode 100644 index 00000000..17453be4 Binary files /dev/null and b/fr/.gitbook/assets/image (107).png differ diff --git a/fr/.gitbook/assets/image (108).png b/fr/.gitbook/assets/image (108).png new file mode 100644 index 00000000..b0b9abe9 Binary files /dev/null and b/fr/.gitbook/assets/image (108).png differ diff --git a/fr/.gitbook/assets/image (109).png b/fr/.gitbook/assets/image (109).png new file mode 100644 index 00000000..8a34f838 Binary files /dev/null and b/fr/.gitbook/assets/image (109).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..8e74d8d2 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..39f86040 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..d040dbe8 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..a1f50e01 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..5d08d059 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1) (1) (1).png new file mode 100644 index 00000000..5288c1c5 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1) (1).png new file mode 100644 index 00000000..06b0a6a6 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1) (2).png b/fr/.gitbook/assets/image (11) (1) (1) (2).png new file mode 100644 index 00000000..4c68c399 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (11) (1) (1).png b/fr/.gitbook/assets/image (11) (1) (1).png new file mode 100644 index 00000000..181ee0d4 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (1).png b/fr/.gitbook/assets/image (11) (1).png new file mode 100644 index 00000000..8e191be9 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (1).png differ diff --git a/fr/.gitbook/assets/image (11) (2).png b/fr/.gitbook/assets/image (11) (2).png new file mode 100644 index 00000000..addf1166 Binary files /dev/null and b/fr/.gitbook/assets/image (11) (2).png differ diff --git a/fr/.gitbook/assets/image (11).png b/fr/.gitbook/assets/image (11).png new file mode 100644 index 00000000..d4a08105 Binary files /dev/null and b/fr/.gitbook/assets/image (11).png differ diff --git a/fr/.gitbook/assets/image (110).png b/fr/.gitbook/assets/image (110).png new file mode 100644 index 00000000..350efaa5 Binary files /dev/null and b/fr/.gitbook/assets/image (110).png differ diff --git a/fr/.gitbook/assets/image (111).png b/fr/.gitbook/assets/image (111).png new file mode 100644 index 00000000..62705295 Binary files /dev/null and b/fr/.gitbook/assets/image (111).png differ diff --git a/fr/.gitbook/assets/image (112).png b/fr/.gitbook/assets/image (112).png new file mode 100644 index 00000000..d0d5a7ad Binary files /dev/null and b/fr/.gitbook/assets/image (112).png differ diff --git a/fr/.gitbook/assets/image (113).png b/fr/.gitbook/assets/image (113).png new file mode 100644 index 00000000..36fcbd16 Binary files /dev/null and b/fr/.gitbook/assets/image (113).png differ diff --git a/fr/.gitbook/assets/image (114).png b/fr/.gitbook/assets/image (114).png new file mode 100644 index 00000000..5d881f93 Binary files /dev/null and b/fr/.gitbook/assets/image (114).png differ diff --git a/fr/.gitbook/assets/image (115).png b/fr/.gitbook/assets/image (115).png new file mode 100644 index 00000000..29629d57 Binary files /dev/null and b/fr/.gitbook/assets/image (115).png differ diff --git a/fr/.gitbook/assets/image (116).png b/fr/.gitbook/assets/image (116).png new file mode 100644 index 00000000..a060a0a1 Binary files /dev/null and b/fr/.gitbook/assets/image (116).png differ diff --git a/fr/.gitbook/assets/image (117).png b/fr/.gitbook/assets/image (117).png new file mode 100644 index 00000000..32990a7d Binary files /dev/null and b/fr/.gitbook/assets/image (117).png differ diff --git a/fr/.gitbook/assets/image (118).png b/fr/.gitbook/assets/image (118).png new file mode 100644 index 00000000..15ac7fea Binary files /dev/null and b/fr/.gitbook/assets/image (118).png differ diff --git a/fr/.gitbook/assets/image (119).png b/fr/.gitbook/assets/image (119).png new file mode 100644 index 00000000..34cc00e1 Binary files /dev/null and b/fr/.gitbook/assets/image (119).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (12) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..ae9b54c3 Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (12) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..5aa164fc Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (12) (1) (1) (1) (1).png new file mode 100644 index 00000000..f93a6122 Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (12) (1) (1) (1) (2).png new file mode 100644 index 00000000..4c575dd8 Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (1) (1).png b/fr/.gitbook/assets/image (12) (1) (1) (1).png new file mode 100644 index 00000000..ef4f7db1 Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (1) (2).png b/fr/.gitbook/assets/image (12) (1) (1) (2).png new file mode 100644 index 00000000..c19a49ea Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (1).png b/fr/.gitbook/assets/image (12) (1) (1).png new file mode 100644 index 00000000..deb4e36b Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (12) (1) (2).png b/fr/.gitbook/assets/image (12) (1) (2).png new file mode 100644 index 00000000..11ba8b6b Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (12) (1).png b/fr/.gitbook/assets/image (12) (1).png new file mode 100644 index 00000000..0160ac3d Binary files /dev/null and b/fr/.gitbook/assets/image (12) (1).png differ diff --git a/fr/.gitbook/assets/image (12) (2) (1).png b/fr/.gitbook/assets/image (12) (2) (1).png new file mode 100644 index 00000000..22f9a6ff Binary files /dev/null and b/fr/.gitbook/assets/image (12) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (12) (2) (2).png b/fr/.gitbook/assets/image (12) (2) (2).png new file mode 100644 index 00000000..edbd4a0c Binary files /dev/null and b/fr/.gitbook/assets/image (12) (2) (2).png differ diff --git a/fr/.gitbook/assets/image (12) (2).png b/fr/.gitbook/assets/image (12) (2).png new file mode 100644 index 00000000..149b5889 Binary files /dev/null and b/fr/.gitbook/assets/image (12) (2).png differ diff --git a/fr/.gitbook/assets/image (12) (3).png b/fr/.gitbook/assets/image (12) (3).png new file mode 100644 index 00000000..d3cf907b Binary files /dev/null and b/fr/.gitbook/assets/image (12) (3).png differ diff --git a/fr/.gitbook/assets/image (12).png b/fr/.gitbook/assets/image (12).png new file mode 100644 index 00000000..10c9e4bd Binary files /dev/null and b/fr/.gitbook/assets/image (12).png differ diff --git a/fr/.gitbook/assets/image (120).png b/fr/.gitbook/assets/image (120).png new file mode 100644 index 00000000..67d36adc Binary files /dev/null and b/fr/.gitbook/assets/image (120).png differ diff --git a/fr/.gitbook/assets/image (121).png b/fr/.gitbook/assets/image (121).png new file mode 100644 index 00000000..3a4b62d2 Binary files /dev/null and b/fr/.gitbook/assets/image (121).png differ diff --git a/fr/.gitbook/assets/image (122).png b/fr/.gitbook/assets/image (122).png new file mode 100644 index 00000000..7a519c93 Binary files /dev/null and b/fr/.gitbook/assets/image (122).png differ diff --git a/fr/.gitbook/assets/image (123).png b/fr/.gitbook/assets/image (123).png new file mode 100644 index 00000000..d60b1496 Binary files /dev/null and b/fr/.gitbook/assets/image (123).png differ diff --git a/fr/.gitbook/assets/image (124).png b/fr/.gitbook/assets/image (124).png new file mode 100644 index 00000000..33cfd22c Binary files /dev/null and b/fr/.gitbook/assets/image (124).png differ diff --git a/fr/.gitbook/assets/image (125).png b/fr/.gitbook/assets/image (125).png new file mode 100644 index 00000000..a8713756 Binary files /dev/null and b/fr/.gitbook/assets/image (125).png differ diff --git a/fr/.gitbook/assets/image (126).png b/fr/.gitbook/assets/image (126).png new file mode 100644 index 00000000..9060a484 Binary files /dev/null and b/fr/.gitbook/assets/image (126).png differ diff --git a/fr/.gitbook/assets/image (127).png b/fr/.gitbook/assets/image (127).png new file mode 100644 index 00000000..2b542c81 Binary files /dev/null and b/fr/.gitbook/assets/image (127).png differ diff --git a/fr/.gitbook/assets/image (128).png b/fr/.gitbook/assets/image (128).png new file mode 100644 index 00000000..83583e62 Binary files /dev/null and b/fr/.gitbook/assets/image (128).png differ diff --git a/fr/.gitbook/assets/image (129).png b/fr/.gitbook/assets/image (129).png new file mode 100644 index 00000000..9950d727 Binary files /dev/null and b/fr/.gitbook/assets/image (129).png differ diff --git a/fr/.gitbook/assets/image (13) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (13) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..eadf03a5 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (13) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..4f2268c3 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (13) (1) (1) (1) (1).png new file mode 100644 index 00000000..8b3effe6 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (1) (1) (1).png b/fr/.gitbook/assets/image (13) (1) (1) (1).png new file mode 100644 index 00000000..ba490a8c Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (13) (1) (1) (2) (1).png new file mode 100644 index 00000000..8fb5c8b3 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (1) (1) (2).png b/fr/.gitbook/assets/image (13) (1) (1) (2).png new file mode 100644 index 00000000..32204e65 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (13) (1) (1).png b/fr/.gitbook/assets/image (13) (1) (1).png new file mode 100644 index 00000000..e1fb4fb1 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (1).png b/fr/.gitbook/assets/image (13) (1).png new file mode 100644 index 00000000..4c09c833 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (2) (1).png b/fr/.gitbook/assets/image (13) (2) (1).png new file mode 100644 index 00000000..1d83d9f4 Binary files /dev/null and b/fr/.gitbook/assets/image (13) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (13) (2) (2).png b/fr/.gitbook/assets/image (13) (2) (2).png new file mode 100644 index 00000000..6a43418e Binary files /dev/null and b/fr/.gitbook/assets/image (13) (2) (2).png differ diff --git a/fr/.gitbook/assets/image (13) (2).png b/fr/.gitbook/assets/image (13) (2).png new file mode 100644 index 00000000..070e8fff Binary files /dev/null and b/fr/.gitbook/assets/image (13) (2).png differ diff --git a/fr/.gitbook/assets/image (13).png b/fr/.gitbook/assets/image (13).png new file mode 100644 index 00000000..8435ddea Binary files /dev/null and b/fr/.gitbook/assets/image (13).png differ diff --git a/fr/.gitbook/assets/image (130).png b/fr/.gitbook/assets/image (130).png new file mode 100644 index 00000000..e2955bf0 Binary files /dev/null and b/fr/.gitbook/assets/image (130).png differ diff --git a/fr/.gitbook/assets/image (131).png b/fr/.gitbook/assets/image (131).png new file mode 100644 index 00000000..50dbc679 Binary files /dev/null and b/fr/.gitbook/assets/image (131).png differ diff --git a/fr/.gitbook/assets/image (132).png b/fr/.gitbook/assets/image (132).png new file mode 100644 index 00000000..71ec7568 Binary files /dev/null and b/fr/.gitbook/assets/image (132).png differ diff --git a/fr/.gitbook/assets/image (133).png b/fr/.gitbook/assets/image (133).png new file mode 100644 index 00000000..dba6e212 Binary files /dev/null and b/fr/.gitbook/assets/image (133).png differ diff --git a/fr/.gitbook/assets/image (134).png b/fr/.gitbook/assets/image (134).png new file mode 100644 index 00000000..e7b987ce Binary files /dev/null and b/fr/.gitbook/assets/image (134).png differ diff --git a/fr/.gitbook/assets/image (135).png b/fr/.gitbook/assets/image (135).png new file mode 100644 index 00000000..3e096df3 Binary files /dev/null and b/fr/.gitbook/assets/image (135).png differ diff --git a/fr/.gitbook/assets/image (136).png b/fr/.gitbook/assets/image (136).png new file mode 100644 index 00000000..fd3709f5 Binary files /dev/null and b/fr/.gitbook/assets/image (136).png differ diff --git a/fr/.gitbook/assets/image (137).png b/fr/.gitbook/assets/image (137).png new file mode 100644 index 00000000..fe6d18ed Binary files /dev/null and b/fr/.gitbook/assets/image (137).png differ diff --git a/fr/.gitbook/assets/image (138).png b/fr/.gitbook/assets/image (138).png new file mode 100644 index 00000000..714bf944 Binary files /dev/null and b/fr/.gitbook/assets/image (138).png differ diff --git a/fr/.gitbook/assets/image (139).png b/fr/.gitbook/assets/image (139).png new file mode 100644 index 00000000..bde86b49 Binary files /dev/null and b/fr/.gitbook/assets/image (139).png differ diff --git a/fr/.gitbook/assets/image (14) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (14) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..cea9d5b7 Binary files /dev/null and b/fr/.gitbook/assets/image (14) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (14) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (14) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..339c68b7 Binary files /dev/null and b/fr/.gitbook/assets/image (14) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (14) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (14) (1) (1) (1) (1).png new file mode 100644 index 00000000..03618507 Binary files /dev/null and b/fr/.gitbook/assets/image (14) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (14) (1) (1) (1).png b/fr/.gitbook/assets/image (14) (1) (1) (1).png new file mode 100644 index 00000000..066a3025 Binary files /dev/null and b/fr/.gitbook/assets/image (14) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (14) (1) (1) (2).png b/fr/.gitbook/assets/image (14) (1) (1) (2).png new file mode 100644 index 00000000..0e7c5b04 Binary files /dev/null and b/fr/.gitbook/assets/image (14) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (14) (1) (1).png b/fr/.gitbook/assets/image (14) (1) (1).png new file mode 100644 index 00000000..d0f7462f Binary files /dev/null and b/fr/.gitbook/assets/image (14) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (14) (1).png b/fr/.gitbook/assets/image (14) (1).png new file mode 100644 index 00000000..39e225a6 Binary files /dev/null and b/fr/.gitbook/assets/image (14) (1).png differ diff --git a/fr/.gitbook/assets/image (14) (2).png b/fr/.gitbook/assets/image (14) (2).png new file mode 100644 index 00000000..92c99bd7 Binary files /dev/null and b/fr/.gitbook/assets/image (14) (2).png differ diff --git a/fr/.gitbook/assets/image (14).png b/fr/.gitbook/assets/image (14).png new file mode 100644 index 00000000..d52bd4da Binary files /dev/null and b/fr/.gitbook/assets/image (14).png differ diff --git a/fr/.gitbook/assets/image (140).png b/fr/.gitbook/assets/image (140).png new file mode 100644 index 00000000..d3bec65d Binary files /dev/null and b/fr/.gitbook/assets/image (140).png differ diff --git a/fr/.gitbook/assets/image (141).png b/fr/.gitbook/assets/image (141).png new file mode 100644 index 00000000..004a46b3 Binary files /dev/null and b/fr/.gitbook/assets/image (141).png differ diff --git a/fr/.gitbook/assets/image (142).png b/fr/.gitbook/assets/image (142).png new file mode 100644 index 00000000..d814dc41 Binary files /dev/null and b/fr/.gitbook/assets/image (142).png differ diff --git a/fr/.gitbook/assets/image (143).png b/fr/.gitbook/assets/image (143).png new file mode 100644 index 00000000..c622470e Binary files /dev/null and b/fr/.gitbook/assets/image (143).png differ diff --git a/fr/.gitbook/assets/image (144).png b/fr/.gitbook/assets/image (144).png new file mode 100644 index 00000000..998565bc Binary files /dev/null and b/fr/.gitbook/assets/image (144).png differ diff --git a/fr/.gitbook/assets/image (145).png b/fr/.gitbook/assets/image (145).png new file mode 100644 index 00000000..907a8ea8 Binary files /dev/null and b/fr/.gitbook/assets/image (145).png differ diff --git a/fr/.gitbook/assets/image (146).png b/fr/.gitbook/assets/image (146).png new file mode 100644 index 00000000..bc69c855 Binary files /dev/null and b/fr/.gitbook/assets/image (146).png differ diff --git a/fr/.gitbook/assets/image (147).png b/fr/.gitbook/assets/image (147).png new file mode 100644 index 00000000..84f102c7 Binary files /dev/null and b/fr/.gitbook/assets/image (147).png differ diff --git a/fr/.gitbook/assets/image (148).png b/fr/.gitbook/assets/image (148).png new file mode 100644 index 00000000..af3d88cb Binary files /dev/null and b/fr/.gitbook/assets/image (148).png differ diff --git a/fr/.gitbook/assets/image (149).png b/fr/.gitbook/assets/image (149).png new file mode 100644 index 00000000..a46fc5de Binary files /dev/null and b/fr/.gitbook/assets/image (149).png differ diff --git a/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c99d558a Binary files /dev/null and b/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..556cd567 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..434ea80d Binary files /dev/null and b/fr/.gitbook/assets/image (15) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (15) (1) (1) (1) (1).png new file mode 100644 index 00000000..66a11f53 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (1) (1) (1).png b/fr/.gitbook/assets/image (15) (1) (1) (1).png new file mode 100644 index 00000000..0f639f00 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (1) (1).png b/fr/.gitbook/assets/image (15) (1) (1).png new file mode 100644 index 00000000..888d0db3 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (1).png b/fr/.gitbook/assets/image (15) (1).png new file mode 100644 index 00000000..5bd67829 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (2) (1) (1).png b/fr/.gitbook/assets/image (15) (2) (1) (1).png new file mode 100644 index 00000000..42e43e05 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (2) (1).png b/fr/.gitbook/assets/image (15) (2) (1).png new file mode 100644 index 00000000..3fe5e6ce Binary files /dev/null and b/fr/.gitbook/assets/image (15) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (15) (2) (2).png b/fr/.gitbook/assets/image (15) (2) (2).png new file mode 100644 index 00000000..024b6d8d Binary files /dev/null and b/fr/.gitbook/assets/image (15) (2) (2).png differ diff --git a/fr/.gitbook/assets/image (15) (2).png b/fr/.gitbook/assets/image (15) (2).png new file mode 100644 index 00000000..5f2011f3 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (2).png differ diff --git a/fr/.gitbook/assets/image (15) (3).png b/fr/.gitbook/assets/image (15) (3).png new file mode 100644 index 00000000..1b9a5002 Binary files /dev/null and b/fr/.gitbook/assets/image (15) (3).png differ diff --git a/fr/.gitbook/assets/image (15).png b/fr/.gitbook/assets/image (15).png new file mode 100644 index 00000000..21d1d25f Binary files /dev/null and b/fr/.gitbook/assets/image (15).png differ diff --git a/fr/.gitbook/assets/image (150).png b/fr/.gitbook/assets/image (150).png new file mode 100644 index 00000000..bc9a669e Binary files /dev/null and b/fr/.gitbook/assets/image (150).png differ diff --git a/fr/.gitbook/assets/image (151).png b/fr/.gitbook/assets/image (151).png new file mode 100644 index 00000000..756d00fd Binary files /dev/null and b/fr/.gitbook/assets/image (151).png differ diff --git a/fr/.gitbook/assets/image (152).png b/fr/.gitbook/assets/image (152).png new file mode 100644 index 00000000..dd190d70 Binary files /dev/null and b/fr/.gitbook/assets/image (152).png differ diff --git a/fr/.gitbook/assets/image (153).png b/fr/.gitbook/assets/image (153).png new file mode 100644 index 00000000..3b61ca4e Binary files /dev/null and b/fr/.gitbook/assets/image (153).png differ diff --git a/fr/.gitbook/assets/image (154).png b/fr/.gitbook/assets/image (154).png new file mode 100644 index 00000000..dbc8e0bb Binary files /dev/null and b/fr/.gitbook/assets/image (154).png differ diff --git a/fr/.gitbook/assets/image (155).png b/fr/.gitbook/assets/image (155).png new file mode 100644 index 00000000..4c5c95ae Binary files /dev/null and b/fr/.gitbook/assets/image (155).png differ diff --git a/fr/.gitbook/assets/image (156).png b/fr/.gitbook/assets/image (156).png new file mode 100644 index 00000000..79be6075 Binary files /dev/null and b/fr/.gitbook/assets/image (156).png differ diff --git a/fr/.gitbook/assets/image (157).png b/fr/.gitbook/assets/image (157).png new file mode 100644 index 00000000..a81894b8 Binary files /dev/null and b/fr/.gitbook/assets/image (157).png differ diff --git a/fr/.gitbook/assets/image (158).png b/fr/.gitbook/assets/image (158).png new file mode 100644 index 00000000..8f1b1956 Binary files /dev/null and b/fr/.gitbook/assets/image (158).png differ diff --git a/fr/.gitbook/assets/image (159).png b/fr/.gitbook/assets/image (159).png new file mode 100644 index 00000000..eda43ab5 Binary files /dev/null and b/fr/.gitbook/assets/image (159).png differ diff --git a/fr/.gitbook/assets/image (16) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (16) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..d3bdadc0 Binary files /dev/null and b/fr/.gitbook/assets/image (16) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (16) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (16) (1) (1) (1) (1).png new file mode 100644 index 00000000..c624b1e0 Binary files /dev/null and b/fr/.gitbook/assets/image (16) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (16) (1) (1) (1).png b/fr/.gitbook/assets/image (16) (1) (1) (1).png new file mode 100644 index 00000000..b082f202 Binary files /dev/null and b/fr/.gitbook/assets/image (16) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (16) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (16) (1) (1) (2) (1).png new file mode 100644 index 00000000..c324cc5f Binary files /dev/null and b/fr/.gitbook/assets/image (16) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (16) (1) (1) (2).png b/fr/.gitbook/assets/image (16) (1) (1) (2).png new file mode 100644 index 00000000..8911e8d8 Binary files /dev/null and b/fr/.gitbook/assets/image (16) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (16) (1) (1).png b/fr/.gitbook/assets/image (16) (1) (1).png new file mode 100644 index 00000000..c7254f23 Binary files /dev/null and b/fr/.gitbook/assets/image (16) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (16) (1).png b/fr/.gitbook/assets/image (16) (1).png new file mode 100644 index 00000000..891b54ca Binary files /dev/null and b/fr/.gitbook/assets/image (16) (1).png differ diff --git a/fr/.gitbook/assets/image (16) (2).png b/fr/.gitbook/assets/image (16) (2).png new file mode 100644 index 00000000..e48c7512 Binary files /dev/null and b/fr/.gitbook/assets/image (16) (2).png differ diff --git a/fr/.gitbook/assets/image (16).png b/fr/.gitbook/assets/image (16).png new file mode 100644 index 00000000..9809fd3b Binary files /dev/null and b/fr/.gitbook/assets/image (16).png differ diff --git a/fr/.gitbook/assets/image (160).png b/fr/.gitbook/assets/image (160).png new file mode 100644 index 00000000..6c9bea35 Binary files /dev/null and b/fr/.gitbook/assets/image (160).png differ diff --git a/fr/.gitbook/assets/image (161).png b/fr/.gitbook/assets/image (161).png new file mode 100644 index 00000000..267cc9f1 Binary files /dev/null and b/fr/.gitbook/assets/image (161).png differ diff --git a/fr/.gitbook/assets/image (162).png b/fr/.gitbook/assets/image (162).png new file mode 100644 index 00000000..28aa9b14 Binary files /dev/null and b/fr/.gitbook/assets/image (162).png differ diff --git a/fr/.gitbook/assets/image (163).png b/fr/.gitbook/assets/image (163).png new file mode 100644 index 00000000..e2676a66 Binary files /dev/null and b/fr/.gitbook/assets/image (163).png differ diff --git a/fr/.gitbook/assets/image (164).png b/fr/.gitbook/assets/image (164).png new file mode 100644 index 00000000..24bcff61 Binary files /dev/null and b/fr/.gitbook/assets/image (164).png differ diff --git a/fr/.gitbook/assets/image (165).png b/fr/.gitbook/assets/image (165).png new file mode 100644 index 00000000..5e0d0fad Binary files /dev/null and b/fr/.gitbook/assets/image (165).png differ diff --git a/fr/.gitbook/assets/image (166).png b/fr/.gitbook/assets/image (166).png new file mode 100644 index 00000000..e5f16e8d Binary files /dev/null and b/fr/.gitbook/assets/image (166).png differ diff --git a/fr/.gitbook/assets/image (167).png b/fr/.gitbook/assets/image (167).png new file mode 100644 index 00000000..e902eef2 Binary files /dev/null and b/fr/.gitbook/assets/image (167).png differ diff --git a/fr/.gitbook/assets/image (168).png b/fr/.gitbook/assets/image (168).png new file mode 100644 index 00000000..9d0ae755 Binary files /dev/null and b/fr/.gitbook/assets/image (168).png differ diff --git a/fr/.gitbook/assets/image (169).png b/fr/.gitbook/assets/image (169).png new file mode 100644 index 00000000..25eb81ba Binary files /dev/null and b/fr/.gitbook/assets/image (169).png differ diff --git a/fr/.gitbook/assets/image (17) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (17) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..f22be2bf Binary files /dev/null and b/fr/.gitbook/assets/image (17) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (17) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (17) (1) (1) (1) (1).png new file mode 100644 index 00000000..7767b3bb Binary files /dev/null and b/fr/.gitbook/assets/image (17) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (17) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (17) (1) (1) (1) (2).png new file mode 100644 index 00000000..57bcd68d Binary files /dev/null and b/fr/.gitbook/assets/image (17) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (17) (1) (1) (1).png b/fr/.gitbook/assets/image (17) (1) (1) (1).png new file mode 100644 index 00000000..724e8e27 Binary files /dev/null and b/fr/.gitbook/assets/image (17) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (17) (1) (1).png b/fr/.gitbook/assets/image (17) (1) (1).png new file mode 100644 index 00000000..e1c6a718 Binary files /dev/null and b/fr/.gitbook/assets/image (17) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (17) (1) (2).png b/fr/.gitbook/assets/image (17) (1) (2).png new file mode 100644 index 00000000..ae55297e Binary files /dev/null and b/fr/.gitbook/assets/image (17) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (17) (1).png b/fr/.gitbook/assets/image (17) (1).png new file mode 100644 index 00000000..f19aa369 Binary files /dev/null and b/fr/.gitbook/assets/image (17) (1).png differ diff --git a/fr/.gitbook/assets/image (17) (2).png b/fr/.gitbook/assets/image (17) (2).png new file mode 100644 index 00000000..e2d1bcc3 Binary files /dev/null and b/fr/.gitbook/assets/image (17) (2).png differ diff --git a/fr/.gitbook/assets/image (17).png b/fr/.gitbook/assets/image (17).png new file mode 100644 index 00000000..004a2268 Binary files /dev/null and b/fr/.gitbook/assets/image (17).png differ diff --git a/fr/.gitbook/assets/image (170).png b/fr/.gitbook/assets/image (170).png new file mode 100644 index 00000000..9e2c6d6b Binary files /dev/null and b/fr/.gitbook/assets/image (170).png differ diff --git a/fr/.gitbook/assets/image (171).png b/fr/.gitbook/assets/image (171).png new file mode 100644 index 00000000..18d42517 Binary files /dev/null and b/fr/.gitbook/assets/image (171).png differ diff --git a/fr/.gitbook/assets/image (172) (1).png b/fr/.gitbook/assets/image (172) (1).png new file mode 100644 index 00000000..76fc4d4f Binary files /dev/null and b/fr/.gitbook/assets/image (172) (1).png differ diff --git a/fr/.gitbook/assets/image (172).png b/fr/.gitbook/assets/image (172).png new file mode 100644 index 00000000..ee3e8d8a Binary files /dev/null and b/fr/.gitbook/assets/image (172).png differ diff --git a/fr/.gitbook/assets/image (173).png b/fr/.gitbook/assets/image (173).png new file mode 100644 index 00000000..4435db0b Binary files /dev/null and b/fr/.gitbook/assets/image (173).png differ diff --git a/fr/.gitbook/assets/image (174).png b/fr/.gitbook/assets/image (174).png new file mode 100644 index 00000000..4435db0b Binary files /dev/null and b/fr/.gitbook/assets/image (174).png differ diff --git a/fr/.gitbook/assets/image (175).png b/fr/.gitbook/assets/image (175).png new file mode 100644 index 00000000..b492743e Binary files /dev/null and b/fr/.gitbook/assets/image (175).png differ diff --git a/fr/.gitbook/assets/image (176).png b/fr/.gitbook/assets/image (176).png new file mode 100644 index 00000000..7d658a87 Binary files /dev/null and b/fr/.gitbook/assets/image (176).png differ diff --git a/fr/.gitbook/assets/image (177).png b/fr/.gitbook/assets/image (177).png new file mode 100644 index 00000000..e0f562cd Binary files /dev/null and b/fr/.gitbook/assets/image (177).png differ diff --git a/fr/.gitbook/assets/image (178).png b/fr/.gitbook/assets/image (178).png new file mode 100644 index 00000000..9e2d6224 Binary files /dev/null and b/fr/.gitbook/assets/image (178).png differ diff --git a/fr/.gitbook/assets/image (179).png b/fr/.gitbook/assets/image (179).png new file mode 100644 index 00000000..91e7effd Binary files /dev/null and b/fr/.gitbook/assets/image (179).png differ diff --git a/fr/.gitbook/assets/image (18) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (18) (1) (1) (1) (1).png new file mode 100644 index 00000000..a4f60172 Binary files /dev/null and b/fr/.gitbook/assets/image (18) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (18) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (18) (1) (1) (1) (2).png new file mode 100644 index 00000000..c77749e2 Binary files /dev/null and b/fr/.gitbook/assets/image (18) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (18) (1) (1) (1).png b/fr/.gitbook/assets/image (18) (1) (1) (1).png new file mode 100644 index 00000000..8229bc3e Binary files /dev/null and b/fr/.gitbook/assets/image (18) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (18) (1) (1).png b/fr/.gitbook/assets/image (18) (1) (1).png new file mode 100644 index 00000000..82079491 Binary files /dev/null and b/fr/.gitbook/assets/image (18) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (18) (1) (2) (1).png b/fr/.gitbook/assets/image (18) (1) (2) (1).png new file mode 100644 index 00000000..8e27a33f Binary files /dev/null and b/fr/.gitbook/assets/image (18) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (18) (1) (2).png b/fr/.gitbook/assets/image (18) (1) (2).png new file mode 100644 index 00000000..1c22f80f Binary files /dev/null and b/fr/.gitbook/assets/image (18) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (18) (1).png b/fr/.gitbook/assets/image (18) (1).png new file mode 100644 index 00000000..fc75b76c Binary files /dev/null and b/fr/.gitbook/assets/image (18) (1).png differ diff --git a/fr/.gitbook/assets/image (18) (2).png b/fr/.gitbook/assets/image (18) (2).png new file mode 100644 index 00000000..0977fe86 Binary files /dev/null and b/fr/.gitbook/assets/image (18) (2).png differ diff --git a/fr/.gitbook/assets/image (18).png b/fr/.gitbook/assets/image (18).png new file mode 100644 index 00000000..f651dbef Binary files /dev/null and b/fr/.gitbook/assets/image (18).png differ diff --git a/fr/.gitbook/assets/image (180).png b/fr/.gitbook/assets/image (180).png new file mode 100644 index 00000000..f655c13d Binary files /dev/null and b/fr/.gitbook/assets/image (180).png differ diff --git a/fr/.gitbook/assets/image (181).png b/fr/.gitbook/assets/image (181).png new file mode 100644 index 00000000..29e128a4 Binary files /dev/null and b/fr/.gitbook/assets/image (181).png differ diff --git a/fr/.gitbook/assets/image (182).png b/fr/.gitbook/assets/image (182).png new file mode 100644 index 00000000..9f1f8782 Binary files /dev/null and b/fr/.gitbook/assets/image (182).png differ diff --git a/fr/.gitbook/assets/image (183).png b/fr/.gitbook/assets/image (183).png new file mode 100644 index 00000000..8bccc4a3 Binary files /dev/null and b/fr/.gitbook/assets/image (183).png differ diff --git a/fr/.gitbook/assets/image (184).png b/fr/.gitbook/assets/image (184).png new file mode 100644 index 00000000..bb509fdb Binary files /dev/null and b/fr/.gitbook/assets/image (184).png differ diff --git a/fr/.gitbook/assets/image (185).png b/fr/.gitbook/assets/image (185).png new file mode 100644 index 00000000..3204952c Binary files /dev/null and b/fr/.gitbook/assets/image (185).png differ diff --git a/fr/.gitbook/assets/image (186).png b/fr/.gitbook/assets/image (186).png new file mode 100644 index 00000000..23c9f1ae Binary files /dev/null and b/fr/.gitbook/assets/image (186).png differ diff --git a/fr/.gitbook/assets/image (187).png b/fr/.gitbook/assets/image (187).png new file mode 100644 index 00000000..bbd72231 Binary files /dev/null and b/fr/.gitbook/assets/image (187).png differ diff --git a/fr/.gitbook/assets/image (188).png b/fr/.gitbook/assets/image (188).png new file mode 100644 index 00000000..0305d3f3 Binary files /dev/null and b/fr/.gitbook/assets/image (188).png differ diff --git a/fr/.gitbook/assets/image (189).png b/fr/.gitbook/assets/image (189).png new file mode 100644 index 00000000..73e50d18 Binary files /dev/null and b/fr/.gitbook/assets/image (189).png differ diff --git a/fr/.gitbook/assets/image (19) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (19) (1) (1) (1) (1).png new file mode 100644 index 00000000..80cd68d8 Binary files /dev/null and b/fr/.gitbook/assets/image (19) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (19) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (19) (1) (1) (1) (2).png new file mode 100644 index 00000000..fdb3193e Binary files /dev/null and b/fr/.gitbook/assets/image (19) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (19) (1) (1) (1).png b/fr/.gitbook/assets/image (19) (1) (1) (1).png new file mode 100644 index 00000000..63b3b7ce Binary files /dev/null and b/fr/.gitbook/assets/image (19) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (19) (1) (1).png b/fr/.gitbook/assets/image (19) (1) (1).png new file mode 100644 index 00000000..55a6b650 Binary files /dev/null and b/fr/.gitbook/assets/image (19) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (19) (1).png b/fr/.gitbook/assets/image (19) (1).png new file mode 100644 index 00000000..fd7c9433 Binary files /dev/null and b/fr/.gitbook/assets/image (19) (1).png differ diff --git a/fr/.gitbook/assets/image (19) (2).png b/fr/.gitbook/assets/image (19) (2).png new file mode 100644 index 00000000..8bf627c5 Binary files /dev/null and b/fr/.gitbook/assets/image (19) (2).png differ diff --git a/fr/.gitbook/assets/image (19).png b/fr/.gitbook/assets/image (19).png new file mode 100644 index 00000000..af634c25 Binary files /dev/null and b/fr/.gitbook/assets/image (19).png differ diff --git a/fr/.gitbook/assets/image (190).png b/fr/.gitbook/assets/image (190).png new file mode 100644 index 00000000..a580944b Binary files /dev/null and b/fr/.gitbook/assets/image (190).png differ diff --git a/fr/.gitbook/assets/image (191).png b/fr/.gitbook/assets/image (191).png new file mode 100644 index 00000000..6b36c1cb Binary files /dev/null and b/fr/.gitbook/assets/image (191).png differ diff --git a/fr/.gitbook/assets/image (192).png b/fr/.gitbook/assets/image (192).png new file mode 100644 index 00000000..cf58731b Binary files /dev/null and b/fr/.gitbook/assets/image (192).png differ diff --git a/fr/.gitbook/assets/image (193).png b/fr/.gitbook/assets/image (193).png new file mode 100644 index 00000000..17700181 Binary files /dev/null and b/fr/.gitbook/assets/image (193).png differ diff --git a/fr/.gitbook/assets/image (194).png b/fr/.gitbook/assets/image (194).png new file mode 100644 index 00000000..e3dbe87b Binary files /dev/null and b/fr/.gitbook/assets/image (194).png differ diff --git a/fr/.gitbook/assets/image (195).png b/fr/.gitbook/assets/image (195).png new file mode 100644 index 00000000..ff22a4be Binary files /dev/null and b/fr/.gitbook/assets/image (195).png differ diff --git a/fr/.gitbook/assets/image (196).png b/fr/.gitbook/assets/image (196).png new file mode 100644 index 00000000..c0f67f2e Binary files /dev/null and b/fr/.gitbook/assets/image (196).png differ diff --git a/fr/.gitbook/assets/image (197).png b/fr/.gitbook/assets/image (197).png new file mode 100644 index 00000000..a4084d45 Binary files /dev/null and b/fr/.gitbook/assets/image (197).png differ diff --git a/fr/.gitbook/assets/image (198).png b/fr/.gitbook/assets/image (198).png new file mode 100644 index 00000000..4e861530 Binary files /dev/null and b/fr/.gitbook/assets/image (198).png differ diff --git a/fr/.gitbook/assets/image (199).png b/fr/.gitbook/assets/image (199).png new file mode 100644 index 00000000..c1728326 Binary files /dev/null and b/fr/.gitbook/assets/image (199).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..bc895c9b Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..3895b165 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..6d23847f Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..e36552aa Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..55272b56 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..66551af4 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..0dd60a40 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..ec7296d3 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..f85ddd58 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..83628ae5 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..e1058db8 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..11f35920 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..97b8a02f Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..5530c9d7 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..2969ad4e Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..3c616586 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..f8a9cf1e Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..08fb33e7 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..9e01b57b Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..585285ee Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c7a0b62d Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..1abc8106 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..900cafe3 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..5f379772 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..7960c0e8 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..f7164c75 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c7d0f855 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..2a0c7459 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..50cb5469 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..0a3b7402 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c12e3060 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..c3877ec5 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..d136f640 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1) (1).png new file mode 100644 index 00000000..0f1f566f Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1) (1).png new file mode 100644 index 00000000..d9ce2e62 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (1) (2).png new file mode 100644 index 00000000..5e09d784 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (1).png b/fr/.gitbook/assets/image (2) (1) (1).png new file mode 100644 index 00000000..f464802c Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (2) (1).png b/fr/.gitbook/assets/image (2) (1) (2) (1).png new file mode 100644 index 00000000..29ffc492 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (1) (2).png b/fr/.gitbook/assets/image (2) (1) (2).png new file mode 100644 index 00000000..2316cf50 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (1).png b/fr/.gitbook/assets/image (2) (1).png new file mode 100644 index 00000000..6db61d0e Binary files /dev/null and b/fr/.gitbook/assets/image (2) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (2).png b/fr/.gitbook/assets/image (2) (2).png new file mode 100644 index 00000000..bc3c8655 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (2).png differ diff --git a/fr/.gitbook/assets/image (2) (3).png b/fr/.gitbook/assets/image (2) (3).png new file mode 100644 index 00000000..c8e34502 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (3).png differ diff --git a/fr/.gitbook/assets/image (2) (4) (1).png b/fr/.gitbook/assets/image (2) (4) (1).png new file mode 100644 index 00000000..66534d13 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (4) (1).png differ diff --git a/fr/.gitbook/assets/image (2) (4).png b/fr/.gitbook/assets/image (2) (4).png new file mode 100644 index 00000000..aee18b7b Binary files /dev/null and b/fr/.gitbook/assets/image (2) (4).png differ diff --git a/fr/.gitbook/assets/image (2) (5).png b/fr/.gitbook/assets/image (2) (5).png new file mode 100644 index 00000000..5eb16098 Binary files /dev/null and b/fr/.gitbook/assets/image (2) (5).png differ diff --git a/fr/.gitbook/assets/image (2) (6).png b/fr/.gitbook/assets/image (2) (6).png new file mode 100644 index 00000000..4926de0a Binary files /dev/null and b/fr/.gitbook/assets/image (2) (6).png differ diff --git a/fr/.gitbook/assets/image (2).png b/fr/.gitbook/assets/image (2).png new file mode 100644 index 00000000..02b1c802 Binary files /dev/null and b/fr/.gitbook/assets/image (2).png differ diff --git a/fr/.gitbook/assets/image (20) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (20) (1) (1) (1) (1).png new file mode 100644 index 00000000..05704748 Binary files /dev/null and b/fr/.gitbook/assets/image (20) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (20) (1) (1) (1).png b/fr/.gitbook/assets/image (20) (1) (1) (1).png new file mode 100644 index 00000000..b732ea1e Binary files /dev/null and b/fr/.gitbook/assets/image (20) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (20) (1) (1).png b/fr/.gitbook/assets/image (20) (1) (1).png new file mode 100644 index 00000000..3d3bcc9b Binary files /dev/null and b/fr/.gitbook/assets/image (20) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (20) (1).png b/fr/.gitbook/assets/image (20) (1).png new file mode 100644 index 00000000..3a52e8b9 Binary files /dev/null and b/fr/.gitbook/assets/image (20) (1).png differ diff --git a/fr/.gitbook/assets/image (20) (2).png b/fr/.gitbook/assets/image (20) (2).png new file mode 100644 index 00000000..24a24cfa Binary files /dev/null and b/fr/.gitbook/assets/image (20) (2).png differ diff --git a/fr/.gitbook/assets/image (20).png b/fr/.gitbook/assets/image (20).png new file mode 100644 index 00000000..c851f1ad Binary files /dev/null and b/fr/.gitbook/assets/image (20).png differ diff --git a/fr/.gitbook/assets/image (200).png b/fr/.gitbook/assets/image (200).png new file mode 100644 index 00000000..762763d0 Binary files /dev/null and b/fr/.gitbook/assets/image (200).png differ diff --git a/fr/.gitbook/assets/image (201).png b/fr/.gitbook/assets/image (201).png new file mode 100644 index 00000000..b04ffcda Binary files /dev/null and b/fr/.gitbook/assets/image (201).png differ diff --git a/fr/.gitbook/assets/image (202).png b/fr/.gitbook/assets/image (202).png new file mode 100644 index 00000000..c1aa9905 Binary files /dev/null and b/fr/.gitbook/assets/image (202).png differ diff --git a/fr/.gitbook/assets/image (203).png b/fr/.gitbook/assets/image (203).png new file mode 100644 index 00000000..0fab1bd9 Binary files /dev/null and b/fr/.gitbook/assets/image (203).png differ diff --git a/fr/.gitbook/assets/image (204).png b/fr/.gitbook/assets/image (204).png new file mode 100644 index 00000000..416758cb Binary files /dev/null and b/fr/.gitbook/assets/image (204).png differ diff --git a/fr/.gitbook/assets/image (205).png b/fr/.gitbook/assets/image (205).png new file mode 100644 index 00000000..1c557770 Binary files /dev/null and b/fr/.gitbook/assets/image (205).png differ diff --git a/fr/.gitbook/assets/image (206).png b/fr/.gitbook/assets/image (206).png new file mode 100644 index 00000000..16884367 Binary files /dev/null and b/fr/.gitbook/assets/image (206).png differ diff --git a/fr/.gitbook/assets/image (207).png b/fr/.gitbook/assets/image (207).png new file mode 100644 index 00000000..784971af Binary files /dev/null and b/fr/.gitbook/assets/image (207).png differ diff --git a/fr/.gitbook/assets/image (208).png b/fr/.gitbook/assets/image (208).png new file mode 100644 index 00000000..ccfd24a6 Binary files /dev/null and b/fr/.gitbook/assets/image (208).png differ diff --git a/fr/.gitbook/assets/image (209).png b/fr/.gitbook/assets/image (209).png new file mode 100644 index 00000000..f6897b2f Binary files /dev/null and b/fr/.gitbook/assets/image (209).png differ diff --git a/fr/.gitbook/assets/image (21) (1) (1) (1).png b/fr/.gitbook/assets/image (21) (1) (1) (1).png new file mode 100644 index 00000000..18bdf929 Binary files /dev/null and b/fr/.gitbook/assets/image (21) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (21) (1) (1).png b/fr/.gitbook/assets/image (21) (1) (1).png new file mode 100644 index 00000000..44d1dc48 Binary files /dev/null and b/fr/.gitbook/assets/image (21) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (21) (1) (2).png b/fr/.gitbook/assets/image (21) (1) (2).png new file mode 100644 index 00000000..e06028b3 Binary files /dev/null and b/fr/.gitbook/assets/image (21) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (21) (1).png b/fr/.gitbook/assets/image (21) (1).png new file mode 100644 index 00000000..7b874167 Binary files /dev/null and b/fr/.gitbook/assets/image (21) (1).png differ diff --git a/fr/.gitbook/assets/image (21).png b/fr/.gitbook/assets/image (21).png new file mode 100644 index 00000000..f432e251 Binary files /dev/null and b/fr/.gitbook/assets/image (21).png differ diff --git a/fr/.gitbook/assets/image (210).png b/fr/.gitbook/assets/image (210).png new file mode 100644 index 00000000..c054a4bb Binary files /dev/null and b/fr/.gitbook/assets/image (210).png differ diff --git a/fr/.gitbook/assets/image (211).png b/fr/.gitbook/assets/image (211).png new file mode 100644 index 00000000..f5256bad Binary files /dev/null and b/fr/.gitbook/assets/image (211).png differ diff --git a/fr/.gitbook/assets/image (212).png b/fr/.gitbook/assets/image (212).png new file mode 100644 index 00000000..38cb61f3 Binary files /dev/null and b/fr/.gitbook/assets/image (212).png differ diff --git a/fr/.gitbook/assets/image (213).png b/fr/.gitbook/assets/image (213).png new file mode 100644 index 00000000..e4313c54 Binary files /dev/null and b/fr/.gitbook/assets/image (213).png differ diff --git a/fr/.gitbook/assets/image (214).png b/fr/.gitbook/assets/image (214).png new file mode 100644 index 00000000..228c7bf1 Binary files /dev/null and b/fr/.gitbook/assets/image (214).png differ diff --git a/fr/.gitbook/assets/image (215).png b/fr/.gitbook/assets/image (215).png new file mode 100644 index 00000000..0a36c645 Binary files /dev/null and b/fr/.gitbook/assets/image (215).png differ diff --git a/fr/.gitbook/assets/image (216).png b/fr/.gitbook/assets/image (216).png new file mode 100644 index 00000000..126b0001 Binary files /dev/null and b/fr/.gitbook/assets/image (216).png differ diff --git a/fr/.gitbook/assets/image (217).png b/fr/.gitbook/assets/image (217).png new file mode 100644 index 00000000..ce41b0b2 Binary files /dev/null and b/fr/.gitbook/assets/image (217).png differ diff --git a/fr/.gitbook/assets/image (218).png b/fr/.gitbook/assets/image (218).png new file mode 100644 index 00000000..e1b50c9e Binary files /dev/null and b/fr/.gitbook/assets/image (218).png differ diff --git a/fr/.gitbook/assets/image (219).png b/fr/.gitbook/assets/image (219).png new file mode 100644 index 00000000..fd886cce Binary files /dev/null and b/fr/.gitbook/assets/image (219).png differ diff --git a/fr/.gitbook/assets/image (22) (1) (1).png b/fr/.gitbook/assets/image (22) (1) (1).png new file mode 100644 index 00000000..b56c5e31 Binary files /dev/null and b/fr/.gitbook/assets/image (22) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (22) (1).png b/fr/.gitbook/assets/image (22) (1).png new file mode 100644 index 00000000..bde4e6cf Binary files /dev/null and b/fr/.gitbook/assets/image (22) (1).png differ diff --git a/fr/.gitbook/assets/image (22).png b/fr/.gitbook/assets/image (22).png new file mode 100644 index 00000000..d90a3ab5 Binary files /dev/null and b/fr/.gitbook/assets/image (22).png differ diff --git a/fr/.gitbook/assets/image (220).png b/fr/.gitbook/assets/image (220).png new file mode 100644 index 00000000..b3f5555a Binary files /dev/null and b/fr/.gitbook/assets/image (220).png differ diff --git a/fr/.gitbook/assets/image (221).png b/fr/.gitbook/assets/image (221).png new file mode 100644 index 00000000..ebf9e42a Binary files /dev/null and b/fr/.gitbook/assets/image (221).png differ diff --git a/fr/.gitbook/assets/image (222).png b/fr/.gitbook/assets/image (222).png new file mode 100644 index 00000000..dd6b84dd Binary files /dev/null and b/fr/.gitbook/assets/image (222).png differ diff --git a/fr/.gitbook/assets/image (223).png b/fr/.gitbook/assets/image (223).png new file mode 100644 index 00000000..eed24cda Binary files /dev/null and b/fr/.gitbook/assets/image (223).png differ diff --git a/fr/.gitbook/assets/image (224).png b/fr/.gitbook/assets/image (224).png new file mode 100644 index 00000000..8e71bbe4 Binary files /dev/null and b/fr/.gitbook/assets/image (224).png differ diff --git a/fr/.gitbook/assets/image (225).png b/fr/.gitbook/assets/image (225).png new file mode 100644 index 00000000..ea423fd4 Binary files /dev/null and b/fr/.gitbook/assets/image (225).png differ diff --git a/fr/.gitbook/assets/image (226).png b/fr/.gitbook/assets/image (226).png new file mode 100644 index 00000000..4ccae983 Binary files /dev/null and b/fr/.gitbook/assets/image (226).png differ diff --git a/fr/.gitbook/assets/image (227).png b/fr/.gitbook/assets/image (227).png new file mode 100644 index 00000000..15e7a591 Binary files /dev/null and b/fr/.gitbook/assets/image (227).png differ diff --git a/fr/.gitbook/assets/image (228).png b/fr/.gitbook/assets/image (228).png new file mode 100644 index 00000000..657b4bd8 Binary files /dev/null and b/fr/.gitbook/assets/image (228).png differ diff --git a/fr/.gitbook/assets/image (229).png b/fr/.gitbook/assets/image (229).png new file mode 100644 index 00000000..3edebcc0 Binary files /dev/null and b/fr/.gitbook/assets/image (229).png differ diff --git a/fr/.gitbook/assets/image (23) (1) (1).png b/fr/.gitbook/assets/image (23) (1) (1).png new file mode 100644 index 00000000..a179f99f Binary files /dev/null and b/fr/.gitbook/assets/image (23) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (23) (1) (2).png b/fr/.gitbook/assets/image (23) (1) (2).png new file mode 100644 index 00000000..0cc470ce Binary files /dev/null and b/fr/.gitbook/assets/image (23) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (23) (1).png b/fr/.gitbook/assets/image (23) (1).png new file mode 100644 index 00000000..851200ea Binary files /dev/null and b/fr/.gitbook/assets/image (23) (1).png differ diff --git a/fr/.gitbook/assets/image (23).png b/fr/.gitbook/assets/image (23).png new file mode 100644 index 00000000..cc800c1f Binary files /dev/null and b/fr/.gitbook/assets/image (23).png differ diff --git a/fr/.gitbook/assets/image (230).png b/fr/.gitbook/assets/image (230).png new file mode 100644 index 00000000..96cc56fe Binary files /dev/null and b/fr/.gitbook/assets/image (230).png differ diff --git a/fr/.gitbook/assets/image (231).png b/fr/.gitbook/assets/image (231).png new file mode 100644 index 00000000..b056dde9 Binary files /dev/null and b/fr/.gitbook/assets/image (231).png differ diff --git a/fr/.gitbook/assets/image (232).png b/fr/.gitbook/assets/image (232).png new file mode 100644 index 00000000..0e6572e6 Binary files /dev/null and b/fr/.gitbook/assets/image (232).png differ diff --git a/fr/.gitbook/assets/image (233).png b/fr/.gitbook/assets/image (233).png new file mode 100644 index 00000000..d8bf5e63 Binary files /dev/null and b/fr/.gitbook/assets/image (233).png differ diff --git a/fr/.gitbook/assets/image (234).png b/fr/.gitbook/assets/image (234).png new file mode 100644 index 00000000..dd43b43c Binary files /dev/null and b/fr/.gitbook/assets/image (234).png differ diff --git a/fr/.gitbook/assets/image (235).png b/fr/.gitbook/assets/image (235).png new file mode 100644 index 00000000..1502ecba Binary files /dev/null and b/fr/.gitbook/assets/image (235).png differ diff --git a/fr/.gitbook/assets/image (236).png b/fr/.gitbook/assets/image (236).png new file mode 100644 index 00000000..aeb202a5 Binary files /dev/null and b/fr/.gitbook/assets/image (236).png differ diff --git a/fr/.gitbook/assets/image (237).png b/fr/.gitbook/assets/image (237).png new file mode 100644 index 00000000..b8f1612c Binary files /dev/null and b/fr/.gitbook/assets/image (237).png differ diff --git a/fr/.gitbook/assets/image (238).png b/fr/.gitbook/assets/image (238).png new file mode 100644 index 00000000..5fe767ad Binary files /dev/null and b/fr/.gitbook/assets/image (238).png differ diff --git a/fr/.gitbook/assets/image (239).png b/fr/.gitbook/assets/image (239).png new file mode 100644 index 00000000..d0c08c85 Binary files /dev/null and b/fr/.gitbook/assets/image (239).png differ diff --git a/fr/.gitbook/assets/image (24) (1).png b/fr/.gitbook/assets/image (24) (1).png new file mode 100644 index 00000000..b129842c Binary files /dev/null and b/fr/.gitbook/assets/image (24) (1).png differ diff --git a/fr/.gitbook/assets/image (24).png b/fr/.gitbook/assets/image (24).png new file mode 100644 index 00000000..82413b59 Binary files /dev/null and b/fr/.gitbook/assets/image (24).png differ diff --git a/fr/.gitbook/assets/image (240).png b/fr/.gitbook/assets/image (240).png new file mode 100644 index 00000000..f2e87855 Binary files /dev/null and b/fr/.gitbook/assets/image (240).png differ diff --git a/fr/.gitbook/assets/image (241).png b/fr/.gitbook/assets/image (241).png new file mode 100644 index 00000000..420c6804 Binary files /dev/null and b/fr/.gitbook/assets/image (241).png differ diff --git a/fr/.gitbook/assets/image (242).png b/fr/.gitbook/assets/image (242).png new file mode 100644 index 00000000..f055ff30 Binary files /dev/null and b/fr/.gitbook/assets/image (242).png differ diff --git a/fr/.gitbook/assets/image (243).png b/fr/.gitbook/assets/image (243).png new file mode 100644 index 00000000..fea1ce7b Binary files /dev/null and b/fr/.gitbook/assets/image (243).png differ diff --git a/fr/.gitbook/assets/image (244).png b/fr/.gitbook/assets/image (244).png new file mode 100644 index 00000000..b1e7497d Binary files /dev/null and b/fr/.gitbook/assets/image (244).png differ diff --git a/fr/.gitbook/assets/image (245).png b/fr/.gitbook/assets/image (245).png new file mode 100644 index 00000000..bfbe6579 Binary files /dev/null and b/fr/.gitbook/assets/image (245).png differ diff --git a/fr/.gitbook/assets/image (246).png b/fr/.gitbook/assets/image (246).png new file mode 100644 index 00000000..6e3fefb4 Binary files /dev/null and b/fr/.gitbook/assets/image (246).png differ diff --git a/fr/.gitbook/assets/image (247).png b/fr/.gitbook/assets/image (247).png new file mode 100644 index 00000000..94f1ca24 Binary files /dev/null and b/fr/.gitbook/assets/image (247).png differ diff --git a/fr/.gitbook/assets/image (248).png b/fr/.gitbook/assets/image (248).png new file mode 100644 index 00000000..5a2e9cf1 Binary files /dev/null and b/fr/.gitbook/assets/image (248).png differ diff --git a/fr/.gitbook/assets/image (249).png b/fr/.gitbook/assets/image (249).png new file mode 100644 index 00000000..fdfdba23 Binary files /dev/null and b/fr/.gitbook/assets/image (249).png differ diff --git a/fr/.gitbook/assets/image (25) (1) (1).png b/fr/.gitbook/assets/image (25) (1) (1).png new file mode 100644 index 00000000..e2b67a32 Binary files /dev/null and b/fr/.gitbook/assets/image (25) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (25) (1).png b/fr/.gitbook/assets/image (25) (1).png new file mode 100644 index 00000000..6e990403 Binary files /dev/null and b/fr/.gitbook/assets/image (25) (1).png differ diff --git a/fr/.gitbook/assets/image (25).png b/fr/.gitbook/assets/image (25).png new file mode 100644 index 00000000..51761efd Binary files /dev/null and b/fr/.gitbook/assets/image (25).png differ diff --git a/fr/.gitbook/assets/image (250).png b/fr/.gitbook/assets/image (250).png new file mode 100644 index 00000000..7f62e6b4 Binary files /dev/null and b/fr/.gitbook/assets/image (250).png differ diff --git a/fr/.gitbook/assets/image (251).png b/fr/.gitbook/assets/image (251).png new file mode 100644 index 00000000..4bb42530 Binary files /dev/null and b/fr/.gitbook/assets/image (251).png differ diff --git a/fr/.gitbook/assets/image (252).png b/fr/.gitbook/assets/image (252).png new file mode 100644 index 00000000..72e5341b Binary files /dev/null and b/fr/.gitbook/assets/image (252).png differ diff --git a/fr/.gitbook/assets/image (253).png b/fr/.gitbook/assets/image (253).png new file mode 100644 index 00000000..8f7a1f6d Binary files /dev/null and b/fr/.gitbook/assets/image (253).png differ diff --git a/fr/.gitbook/assets/image (254).png b/fr/.gitbook/assets/image (254).png new file mode 100644 index 00000000..45b6f49f Binary files /dev/null and b/fr/.gitbook/assets/image (254).png differ diff --git a/fr/.gitbook/assets/image (255).png b/fr/.gitbook/assets/image (255).png new file mode 100644 index 00000000..3fc2f81a Binary files /dev/null and b/fr/.gitbook/assets/image (255).png differ diff --git a/fr/.gitbook/assets/image (256).png b/fr/.gitbook/assets/image (256).png new file mode 100644 index 00000000..8dc2b06f Binary files /dev/null and b/fr/.gitbook/assets/image (256).png differ diff --git a/fr/.gitbook/assets/image (257).png b/fr/.gitbook/assets/image (257).png new file mode 100644 index 00000000..76a158f5 Binary files /dev/null and b/fr/.gitbook/assets/image (257).png differ diff --git a/fr/.gitbook/assets/image (258).png b/fr/.gitbook/assets/image (258).png new file mode 100644 index 00000000..85b49271 Binary files /dev/null and b/fr/.gitbook/assets/image (258).png differ diff --git a/fr/.gitbook/assets/image (259).png b/fr/.gitbook/assets/image (259).png new file mode 100644 index 00000000..ba6235fd Binary files /dev/null and b/fr/.gitbook/assets/image (259).png differ diff --git a/fr/.gitbook/assets/image (26).png b/fr/.gitbook/assets/image (26).png new file mode 100644 index 00000000..6c72c3cb Binary files /dev/null and b/fr/.gitbook/assets/image (26).png differ diff --git a/fr/.gitbook/assets/image (260).png b/fr/.gitbook/assets/image (260).png new file mode 100644 index 00000000..680bf32f Binary files /dev/null and b/fr/.gitbook/assets/image (260).png differ diff --git a/fr/.gitbook/assets/image (261).png b/fr/.gitbook/assets/image (261).png new file mode 100644 index 00000000..482ff7bd Binary files /dev/null and b/fr/.gitbook/assets/image (261).png differ diff --git a/fr/.gitbook/assets/image (262).png b/fr/.gitbook/assets/image (262).png new file mode 100644 index 00000000..d6b00970 Binary files /dev/null and b/fr/.gitbook/assets/image (262).png differ diff --git a/fr/.gitbook/assets/image (263).png b/fr/.gitbook/assets/image (263).png new file mode 100644 index 00000000..75e8c17e Binary files /dev/null and b/fr/.gitbook/assets/image (263).png differ diff --git a/fr/.gitbook/assets/image (264).png b/fr/.gitbook/assets/image (264).png new file mode 100644 index 00000000..a9c5cbe4 Binary files /dev/null and b/fr/.gitbook/assets/image (264).png differ diff --git a/fr/.gitbook/assets/image (265).png b/fr/.gitbook/assets/image (265).png new file mode 100644 index 00000000..1b58bf4c Binary files /dev/null and b/fr/.gitbook/assets/image (265).png differ diff --git a/fr/.gitbook/assets/image (266).png b/fr/.gitbook/assets/image (266).png new file mode 100644 index 00000000..08a94eb8 Binary files /dev/null and b/fr/.gitbook/assets/image (266).png differ diff --git a/fr/.gitbook/assets/image (267).png b/fr/.gitbook/assets/image (267).png new file mode 100644 index 00000000..34f553e7 Binary files /dev/null and b/fr/.gitbook/assets/image (267).png differ diff --git a/fr/.gitbook/assets/image (268).png b/fr/.gitbook/assets/image (268).png new file mode 100644 index 00000000..e00d55b0 Binary files /dev/null and b/fr/.gitbook/assets/image (268).png differ diff --git a/fr/.gitbook/assets/image (269).png b/fr/.gitbook/assets/image (269).png new file mode 100644 index 00000000..b11c5219 Binary files /dev/null and b/fr/.gitbook/assets/image (269).png differ diff --git a/fr/.gitbook/assets/image (27) (1) (1).png b/fr/.gitbook/assets/image (27) (1) (1).png new file mode 100644 index 00000000..88bca02c Binary files /dev/null and b/fr/.gitbook/assets/image (27) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (27) (1).png b/fr/.gitbook/assets/image (27) (1).png new file mode 100644 index 00000000..117fabc6 Binary files /dev/null and b/fr/.gitbook/assets/image (27) (1).png differ diff --git a/fr/.gitbook/assets/image (27).png b/fr/.gitbook/assets/image (27).png new file mode 100644 index 00000000..26be8099 Binary files /dev/null and b/fr/.gitbook/assets/image (27).png differ diff --git a/fr/.gitbook/assets/image (270).png b/fr/.gitbook/assets/image (270).png new file mode 100644 index 00000000..879e7e4d Binary files /dev/null and b/fr/.gitbook/assets/image (270).png differ diff --git a/fr/.gitbook/assets/image (271).png b/fr/.gitbook/assets/image (271).png new file mode 100644 index 00000000..d0b63f5d Binary files /dev/null and b/fr/.gitbook/assets/image (271).png differ diff --git a/fr/.gitbook/assets/image (272).png b/fr/.gitbook/assets/image (272).png new file mode 100644 index 00000000..adc51147 Binary files /dev/null and b/fr/.gitbook/assets/image (272).png differ diff --git a/fr/.gitbook/assets/image (273).png b/fr/.gitbook/assets/image (273).png new file mode 100644 index 00000000..1153171c Binary files /dev/null and b/fr/.gitbook/assets/image (273).png differ diff --git a/fr/.gitbook/assets/image (274).png b/fr/.gitbook/assets/image (274).png new file mode 100644 index 00000000..34a15295 Binary files /dev/null and b/fr/.gitbook/assets/image (274).png differ diff --git a/fr/.gitbook/assets/image (275).png b/fr/.gitbook/assets/image (275).png new file mode 100644 index 00000000..8701f663 Binary files /dev/null and b/fr/.gitbook/assets/image (275).png differ diff --git a/fr/.gitbook/assets/image (276).png b/fr/.gitbook/assets/image (276).png new file mode 100644 index 00000000..d6892a94 Binary files /dev/null and b/fr/.gitbook/assets/image (276).png differ diff --git a/fr/.gitbook/assets/image (277).png b/fr/.gitbook/assets/image (277).png new file mode 100644 index 00000000..eb998173 Binary files /dev/null and b/fr/.gitbook/assets/image (277).png differ diff --git a/fr/.gitbook/assets/image (278).png b/fr/.gitbook/assets/image (278).png new file mode 100644 index 00000000..26353df7 Binary files /dev/null and b/fr/.gitbook/assets/image (278).png differ diff --git a/fr/.gitbook/assets/image (279).png b/fr/.gitbook/assets/image (279).png new file mode 100644 index 00000000..ca6650cf Binary files /dev/null and b/fr/.gitbook/assets/image (279).png differ diff --git a/fr/.gitbook/assets/image (28).png b/fr/.gitbook/assets/image (28).png new file mode 100644 index 00000000..8e11f425 Binary files /dev/null and b/fr/.gitbook/assets/image (28).png differ diff --git a/fr/.gitbook/assets/image (280).png b/fr/.gitbook/assets/image (280).png new file mode 100644 index 00000000..58eebcb2 Binary files /dev/null and b/fr/.gitbook/assets/image (280).png differ diff --git a/fr/.gitbook/assets/image (281).png b/fr/.gitbook/assets/image (281).png new file mode 100644 index 00000000..79e81472 Binary files /dev/null and b/fr/.gitbook/assets/image (281).png differ diff --git a/fr/.gitbook/assets/image (282).png b/fr/.gitbook/assets/image (282).png new file mode 100644 index 00000000..de7b3f14 Binary files /dev/null and b/fr/.gitbook/assets/image (282).png differ diff --git a/fr/.gitbook/assets/image (283).png b/fr/.gitbook/assets/image (283).png new file mode 100644 index 00000000..725b8abc Binary files /dev/null and b/fr/.gitbook/assets/image (283).png differ diff --git a/fr/.gitbook/assets/image (284).png b/fr/.gitbook/assets/image (284).png new file mode 100644 index 00000000..1ee68cae Binary files /dev/null and b/fr/.gitbook/assets/image (284).png differ diff --git a/fr/.gitbook/assets/image (285).png b/fr/.gitbook/assets/image (285).png new file mode 100644 index 00000000..0b4d2832 Binary files /dev/null and b/fr/.gitbook/assets/image (285).png differ diff --git a/fr/.gitbook/assets/image (286).png b/fr/.gitbook/assets/image (286).png new file mode 100644 index 00000000..2e24b6ef Binary files /dev/null and b/fr/.gitbook/assets/image (286).png differ diff --git a/fr/.gitbook/assets/image (287).png b/fr/.gitbook/assets/image (287).png new file mode 100644 index 00000000..9ed28b43 Binary files /dev/null and b/fr/.gitbook/assets/image (287).png differ diff --git a/fr/.gitbook/assets/image (288).png b/fr/.gitbook/assets/image (288).png new file mode 100644 index 00000000..b44a6999 Binary files /dev/null and b/fr/.gitbook/assets/image (288).png differ diff --git a/fr/.gitbook/assets/image (289).png b/fr/.gitbook/assets/image (289).png new file mode 100644 index 00000000..3145a05e Binary files /dev/null and b/fr/.gitbook/assets/image (289).png differ diff --git a/fr/.gitbook/assets/image (29) (1).png b/fr/.gitbook/assets/image (29) (1).png new file mode 100644 index 00000000..6fca6bb5 Binary files /dev/null and b/fr/.gitbook/assets/image (29) (1).png differ diff --git a/fr/.gitbook/assets/image (29).png b/fr/.gitbook/assets/image (29).png new file mode 100644 index 00000000..9e7ddff4 Binary files /dev/null and b/fr/.gitbook/assets/image (29).png differ diff --git a/fr/.gitbook/assets/image (290).png b/fr/.gitbook/assets/image (290).png new file mode 100644 index 00000000..c64d812e Binary files /dev/null and b/fr/.gitbook/assets/image (290).png differ diff --git a/fr/.gitbook/assets/image (291).png b/fr/.gitbook/assets/image (291).png new file mode 100644 index 00000000..a5172bac Binary files /dev/null and b/fr/.gitbook/assets/image (291).png differ diff --git a/fr/.gitbook/assets/image (292).png b/fr/.gitbook/assets/image (292).png new file mode 100644 index 00000000..de66549b Binary files /dev/null and b/fr/.gitbook/assets/image (292).png differ diff --git a/fr/.gitbook/assets/image (293).png b/fr/.gitbook/assets/image (293).png new file mode 100644 index 00000000..3e05a94b Binary files /dev/null and b/fr/.gitbook/assets/image (293).png differ diff --git a/fr/.gitbook/assets/image (294).png b/fr/.gitbook/assets/image (294).png new file mode 100644 index 00000000..b3c75245 Binary files /dev/null and b/fr/.gitbook/assets/image (294).png differ diff --git a/fr/.gitbook/assets/image (295).png b/fr/.gitbook/assets/image (295).png new file mode 100644 index 00000000..bd40d2ea Binary files /dev/null and b/fr/.gitbook/assets/image (295).png differ diff --git a/fr/.gitbook/assets/image (296).png b/fr/.gitbook/assets/image (296).png new file mode 100644 index 00000000..feb1e71a Binary files /dev/null and b/fr/.gitbook/assets/image (296).png differ diff --git a/fr/.gitbook/assets/image (297).png b/fr/.gitbook/assets/image (297).png new file mode 100644 index 00000000..f02ffafa Binary files /dev/null and b/fr/.gitbook/assets/image (297).png differ diff --git a/fr/.gitbook/assets/image (298).png b/fr/.gitbook/assets/image (298).png new file mode 100644 index 00000000..3b4df9ad Binary files /dev/null and b/fr/.gitbook/assets/image (298).png differ diff --git a/fr/.gitbook/assets/image (299).png b/fr/.gitbook/assets/image (299).png new file mode 100644 index 00000000..9c6a156d Binary files /dev/null and b/fr/.gitbook/assets/image (299).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..d3bdadc0 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..8aac93ff Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..032405c2 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..faa43441 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..b1ae287b Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..7c8b4107 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..95fc122a Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..02226006 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..ff4b32e5 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..dd3ab103 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..e215cc1f Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..6b190a07 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..31b162f2 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..cd73ca2a Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..27f2db43 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..625903f2 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..61bc4530 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..429682af Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..fead7383 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..03663655 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..f303c3dc Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..1e4bde69 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..c4d5738f Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..7433bbac Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..82a27cc7 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..2c72824b Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (1).png new file mode 100644 index 00000000..9ae78a3f Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..1d0e3797 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (1) (2).png new file mode 100644 index 00000000..09dafdad Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (1).png new file mode 100644 index 00000000..f20d76bb Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (3) (1) (1) (2) (1).png new file mode 100644 index 00000000..6dbc792d Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (1) (2).png new file mode 100644 index 00000000..1c7ee1dd Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (1).png b/fr/.gitbook/assets/image (3) (1) (1).png new file mode 100644 index 00000000..d975a7a2 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (1) (2).png b/fr/.gitbook/assets/image (3) (1) (2).png new file mode 100644 index 00000000..42ec9997 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (1).png b/fr/.gitbook/assets/image (3) (1).png new file mode 100644 index 00000000..c46f0eea Binary files /dev/null and b/fr/.gitbook/assets/image (3) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (2).png b/fr/.gitbook/assets/image (3) (2).png new file mode 100644 index 00000000..245724ff Binary files /dev/null and b/fr/.gitbook/assets/image (3) (2).png differ diff --git a/fr/.gitbook/assets/image (3) (3) (1) (1).png b/fr/.gitbook/assets/image (3) (3) (1) (1).png new file mode 100644 index 00000000..8fba76d4 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (3) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (3) (1).png b/fr/.gitbook/assets/image (3) (3) (1).png new file mode 100644 index 00000000..05ee968d Binary files /dev/null and b/fr/.gitbook/assets/image (3) (3) (1).png differ diff --git a/fr/.gitbook/assets/image (3) (3).png b/fr/.gitbook/assets/image (3) (3).png new file mode 100644 index 00000000..23d23a73 Binary files /dev/null and b/fr/.gitbook/assets/image (3) (3).png differ diff --git a/fr/.gitbook/assets/image (3) (4).png b/fr/.gitbook/assets/image (3) (4).png new file mode 100644 index 00000000..ce87876b Binary files /dev/null and b/fr/.gitbook/assets/image (3) (4).png differ diff --git a/fr/.gitbook/assets/image (3).png b/fr/.gitbook/assets/image (3).png new file mode 100644 index 00000000..0c783233 Binary files /dev/null and b/fr/.gitbook/assets/image (3).png differ diff --git a/fr/.gitbook/assets/image (30) (1).png b/fr/.gitbook/assets/image (30) (1).png new file mode 100644 index 00000000..c6eed90f Binary files /dev/null and b/fr/.gitbook/assets/image (30) (1).png differ diff --git a/fr/.gitbook/assets/image (30).png b/fr/.gitbook/assets/image (30).png new file mode 100644 index 00000000..8a874854 Binary files /dev/null and b/fr/.gitbook/assets/image (30).png differ diff --git a/fr/.gitbook/assets/image (300).png b/fr/.gitbook/assets/image (300).png new file mode 100644 index 00000000..7efb9fac Binary files /dev/null and b/fr/.gitbook/assets/image (300).png differ diff --git a/fr/.gitbook/assets/image (301).png b/fr/.gitbook/assets/image (301).png new file mode 100644 index 00000000..f10e797d Binary files /dev/null and b/fr/.gitbook/assets/image (301).png differ diff --git a/fr/.gitbook/assets/image (302).png b/fr/.gitbook/assets/image (302).png new file mode 100644 index 00000000..2b50a563 Binary files /dev/null and b/fr/.gitbook/assets/image (302).png differ diff --git a/fr/.gitbook/assets/image (303).png b/fr/.gitbook/assets/image (303).png new file mode 100644 index 00000000..908ade0f Binary files /dev/null and b/fr/.gitbook/assets/image (303).png differ diff --git a/fr/.gitbook/assets/image (304).png b/fr/.gitbook/assets/image (304).png new file mode 100644 index 00000000..c7cfe6fc Binary files /dev/null and b/fr/.gitbook/assets/image (304).png differ diff --git a/fr/.gitbook/assets/image (305).png b/fr/.gitbook/assets/image (305).png new file mode 100644 index 00000000..19cd6739 Binary files /dev/null and b/fr/.gitbook/assets/image (305).png differ diff --git a/fr/.gitbook/assets/image (306).png b/fr/.gitbook/assets/image (306).png new file mode 100644 index 00000000..e5b6ebcf Binary files /dev/null and b/fr/.gitbook/assets/image (306).png differ diff --git a/fr/.gitbook/assets/image (307).png b/fr/.gitbook/assets/image (307).png new file mode 100644 index 00000000..28a07fb8 Binary files /dev/null and b/fr/.gitbook/assets/image (307).png differ diff --git a/fr/.gitbook/assets/image (308).png b/fr/.gitbook/assets/image (308).png new file mode 100644 index 00000000..ff84f388 Binary files /dev/null and b/fr/.gitbook/assets/image (308).png differ diff --git a/fr/.gitbook/assets/image (309).png b/fr/.gitbook/assets/image (309).png new file mode 100644 index 00000000..b32ac009 Binary files /dev/null and b/fr/.gitbook/assets/image (309).png differ diff --git a/fr/.gitbook/assets/image (31).png b/fr/.gitbook/assets/image (31).png new file mode 100644 index 00000000..7470f0c2 Binary files /dev/null and b/fr/.gitbook/assets/image (31).png differ diff --git a/fr/.gitbook/assets/image (310).png b/fr/.gitbook/assets/image (310).png new file mode 100644 index 00000000..b32ac009 Binary files /dev/null and b/fr/.gitbook/assets/image (310).png differ diff --git a/fr/.gitbook/assets/image (311).png b/fr/.gitbook/assets/image (311).png new file mode 100644 index 00000000..ff3d6da3 Binary files /dev/null and b/fr/.gitbook/assets/image (311).png differ diff --git a/fr/.gitbook/assets/image (312).png b/fr/.gitbook/assets/image (312).png new file mode 100644 index 00000000..adf91df9 Binary files /dev/null and b/fr/.gitbook/assets/image (312).png differ diff --git a/fr/.gitbook/assets/image (313).png b/fr/.gitbook/assets/image (313).png new file mode 100644 index 00000000..13560ca1 Binary files /dev/null and b/fr/.gitbook/assets/image (313).png differ diff --git a/fr/.gitbook/assets/image (314).png b/fr/.gitbook/assets/image (314).png new file mode 100644 index 00000000..95b21316 Binary files /dev/null and b/fr/.gitbook/assets/image (314).png differ diff --git a/fr/.gitbook/assets/image (315).png b/fr/.gitbook/assets/image (315).png new file mode 100644 index 00000000..99cede57 Binary files /dev/null and b/fr/.gitbook/assets/image (315).png differ diff --git a/fr/.gitbook/assets/image (316).png b/fr/.gitbook/assets/image (316).png new file mode 100644 index 00000000..84b81c8f Binary files /dev/null and b/fr/.gitbook/assets/image (316).png differ diff --git a/fr/.gitbook/assets/image (317).png b/fr/.gitbook/assets/image (317).png new file mode 100644 index 00000000..3c6b86de Binary files /dev/null and b/fr/.gitbook/assets/image (317).png differ diff --git a/fr/.gitbook/assets/image (318).png b/fr/.gitbook/assets/image (318).png new file mode 100644 index 00000000..9ae53e59 Binary files /dev/null and b/fr/.gitbook/assets/image (318).png differ diff --git a/fr/.gitbook/assets/image (319).png b/fr/.gitbook/assets/image (319).png new file mode 100644 index 00000000..adf41819 Binary files /dev/null and b/fr/.gitbook/assets/image (319).png differ diff --git a/fr/.gitbook/assets/image (32).png b/fr/.gitbook/assets/image (32).png new file mode 100644 index 00000000..840d2d2c Binary files /dev/null and b/fr/.gitbook/assets/image (32).png differ diff --git a/fr/.gitbook/assets/image (320).png b/fr/.gitbook/assets/image (320).png new file mode 100644 index 00000000..44756a08 Binary files /dev/null and b/fr/.gitbook/assets/image (320).png differ diff --git a/fr/.gitbook/assets/image (321).png b/fr/.gitbook/assets/image (321).png new file mode 100644 index 00000000..cdcc8d35 Binary files /dev/null and b/fr/.gitbook/assets/image (321).png differ diff --git a/fr/.gitbook/assets/image (322).png b/fr/.gitbook/assets/image (322).png new file mode 100644 index 00000000..ea0e075c Binary files /dev/null and b/fr/.gitbook/assets/image (322).png differ diff --git a/fr/.gitbook/assets/image (323).png b/fr/.gitbook/assets/image (323).png new file mode 100644 index 00000000..d04408b0 Binary files /dev/null and b/fr/.gitbook/assets/image (323).png differ diff --git a/fr/.gitbook/assets/image (324).png b/fr/.gitbook/assets/image (324).png new file mode 100644 index 00000000..a7b2c307 Binary files /dev/null and b/fr/.gitbook/assets/image (324).png differ diff --git a/fr/.gitbook/assets/image (325).png b/fr/.gitbook/assets/image (325).png new file mode 100644 index 00000000..051e1d4f Binary files /dev/null and b/fr/.gitbook/assets/image (325).png differ diff --git a/fr/.gitbook/assets/image (326).png b/fr/.gitbook/assets/image (326).png new file mode 100644 index 00000000..727bac73 Binary files /dev/null and b/fr/.gitbook/assets/image (326).png differ diff --git a/fr/.gitbook/assets/image (327).png b/fr/.gitbook/assets/image (327).png new file mode 100644 index 00000000..ec5ed9b8 Binary files /dev/null and b/fr/.gitbook/assets/image (327).png differ diff --git a/fr/.gitbook/assets/image (328).png b/fr/.gitbook/assets/image (328).png new file mode 100644 index 00000000..eb7052a4 Binary files /dev/null and b/fr/.gitbook/assets/image (328).png differ diff --git a/fr/.gitbook/assets/image (329).png b/fr/.gitbook/assets/image (329).png new file mode 100644 index 00000000..c593d5b8 Binary files /dev/null and b/fr/.gitbook/assets/image (329).png differ diff --git a/fr/.gitbook/assets/image (33).png b/fr/.gitbook/assets/image (33).png new file mode 100644 index 00000000..6e7bc1d4 Binary files /dev/null and b/fr/.gitbook/assets/image (33).png differ diff --git a/fr/.gitbook/assets/image (330).png b/fr/.gitbook/assets/image (330).png new file mode 100644 index 00000000..02ef696d Binary files /dev/null and b/fr/.gitbook/assets/image (330).png differ diff --git a/fr/.gitbook/assets/image (331).png b/fr/.gitbook/assets/image (331).png new file mode 100644 index 00000000..3da18717 Binary files /dev/null and b/fr/.gitbook/assets/image (331).png differ diff --git a/fr/.gitbook/assets/image (332).png b/fr/.gitbook/assets/image (332).png new file mode 100644 index 00000000..1b8da3c6 Binary files /dev/null and b/fr/.gitbook/assets/image (332).png differ diff --git a/fr/.gitbook/assets/image (333).png b/fr/.gitbook/assets/image (333).png new file mode 100644 index 00000000..d33634ce Binary files /dev/null and b/fr/.gitbook/assets/image (333).png differ diff --git a/fr/.gitbook/assets/image (334).png b/fr/.gitbook/assets/image (334).png new file mode 100644 index 00000000..c974fd73 Binary files /dev/null and b/fr/.gitbook/assets/image (334).png differ diff --git a/fr/.gitbook/assets/image (335).png b/fr/.gitbook/assets/image (335).png new file mode 100644 index 00000000..3c1000d5 Binary files /dev/null and b/fr/.gitbook/assets/image (335).png differ diff --git a/fr/.gitbook/assets/image (336).png b/fr/.gitbook/assets/image (336).png new file mode 100644 index 00000000..1364c9fa Binary files /dev/null and b/fr/.gitbook/assets/image (336).png differ diff --git a/fr/.gitbook/assets/image (337).png b/fr/.gitbook/assets/image (337).png new file mode 100644 index 00000000..8d085117 Binary files /dev/null and b/fr/.gitbook/assets/image (337).png differ diff --git a/fr/.gitbook/assets/image (34).png b/fr/.gitbook/assets/image (34).png new file mode 100644 index 00000000..b433207a Binary files /dev/null and b/fr/.gitbook/assets/image (34).png differ diff --git a/fr/.gitbook/assets/image (35).png b/fr/.gitbook/assets/image (35).png new file mode 100644 index 00000000..76fc4d4f Binary files /dev/null and b/fr/.gitbook/assets/image (35).png differ diff --git a/fr/.gitbook/assets/image (36) (1).png b/fr/.gitbook/assets/image (36) (1).png new file mode 100644 index 00000000..08d7ca74 Binary files /dev/null and b/fr/.gitbook/assets/image (36) (1).png differ diff --git a/fr/.gitbook/assets/image (36).png b/fr/.gitbook/assets/image (36).png new file mode 100644 index 00000000..6b154378 Binary files /dev/null and b/fr/.gitbook/assets/image (36).png differ diff --git a/fr/.gitbook/assets/image (37) (1).png b/fr/.gitbook/assets/image (37) (1).png new file mode 100644 index 00000000..071dd61c Binary files /dev/null and b/fr/.gitbook/assets/image (37) (1).png differ diff --git a/fr/.gitbook/assets/image (37).png b/fr/.gitbook/assets/image (37).png new file mode 100644 index 00000000..b08c4ddc Binary files /dev/null and b/fr/.gitbook/assets/image (37).png differ diff --git a/fr/.gitbook/assets/image (38) (1) (1).png b/fr/.gitbook/assets/image (38) (1) (1).png new file mode 100644 index 00000000..5c6cdf10 Binary files /dev/null and b/fr/.gitbook/assets/image (38) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (38) (1).png b/fr/.gitbook/assets/image (38) (1).png new file mode 100644 index 00000000..32c0f035 Binary files /dev/null and b/fr/.gitbook/assets/image (38) (1).png differ diff --git a/fr/.gitbook/assets/image (38).png b/fr/.gitbook/assets/image (38).png new file mode 100644 index 00000000..31db2749 Binary files /dev/null and b/fr/.gitbook/assets/image (38).png differ diff --git a/fr/.gitbook/assets/image (39) (1).png b/fr/.gitbook/assets/image (39) (1).png new file mode 100644 index 00000000..dde5e942 Binary files /dev/null and b/fr/.gitbook/assets/image (39) (1).png differ diff --git a/fr/.gitbook/assets/image (39).png b/fr/.gitbook/assets/image (39).png new file mode 100644 index 00000000..2400c5dc Binary files /dev/null and b/fr/.gitbook/assets/image (39).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..fff8514b Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..82fb7ff9 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..ea118f46 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..19dfb487 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..ba25dada Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..3578f9e1 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..f38c3016 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..f6ccab6a Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..6bc3f51c Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..82d8ba36 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..cc1e6972 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..920b8945 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..3092272d Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c0edca89 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..8db0e191 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..1ab0212d Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..bada2fe6 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..c3893a5a Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..2d1a0c10 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..5ce488ab Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..9d857bcd Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..370d859c Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..6baf30b8 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (3).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (3).png new file mode 100644 index 00000000..05c11c7d Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1) (3).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1) (1).png new file mode 100644 index 00000000..98071c51 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1) (1).png new file mode 100644 index 00000000..c1bb3365 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (1).png new file mode 100644 index 00000000..75b90e93 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (4) (1) (2) (1) (1).png new file mode 100644 index 00000000..5d0c0141 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (2) (1).png b/fr/.gitbook/assets/image (4) (1) (2) (1).png new file mode 100644 index 00000000..54e7a64e Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (2).png b/fr/.gitbook/assets/image (4) (1) (2).png new file mode 100644 index 00000000..f128d34e Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (1) (3).png b/fr/.gitbook/assets/image (4) (1) (3).png new file mode 100644 index 00000000..bd572796 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1) (3).png differ diff --git a/fr/.gitbook/assets/image (4) (1).png b/fr/.gitbook/assets/image (4) (1).png new file mode 100644 index 00000000..c0901edb Binary files /dev/null and b/fr/.gitbook/assets/image (4) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (2).png b/fr/.gitbook/assets/image (4) (2).png new file mode 100644 index 00000000..87f7d80f Binary files /dev/null and b/fr/.gitbook/assets/image (4) (2).png differ diff --git a/fr/.gitbook/assets/image (4) (3) (1).png b/fr/.gitbook/assets/image (4) (3) (1).png new file mode 100644 index 00000000..d4aa4fe6 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (3) (1).png differ diff --git a/fr/.gitbook/assets/image (4) (3).png b/fr/.gitbook/assets/image (4) (3).png new file mode 100644 index 00000000..c8342008 Binary files /dev/null and b/fr/.gitbook/assets/image (4) (3).png differ diff --git a/fr/.gitbook/assets/image (4).png b/fr/.gitbook/assets/image (4).png new file mode 100644 index 00000000..010e4714 Binary files /dev/null and b/fr/.gitbook/assets/image (4).png differ diff --git a/fr/.gitbook/assets/image (40).png b/fr/.gitbook/assets/image (40).png new file mode 100644 index 00000000..a32cfba1 Binary files /dev/null and b/fr/.gitbook/assets/image (40).png differ diff --git a/fr/.gitbook/assets/image (41).png b/fr/.gitbook/assets/image (41).png new file mode 100644 index 00000000..3c6a0e8f Binary files /dev/null and b/fr/.gitbook/assets/image (41).png differ diff --git a/fr/.gitbook/assets/image (42).png b/fr/.gitbook/assets/image (42).png new file mode 100644 index 00000000..f3bacffc Binary files /dev/null and b/fr/.gitbook/assets/image (42).png differ diff --git a/fr/.gitbook/assets/image (43).png b/fr/.gitbook/assets/image (43).png new file mode 100644 index 00000000..53140bf6 Binary files /dev/null and b/fr/.gitbook/assets/image (43).png differ diff --git a/fr/.gitbook/assets/image (44).png b/fr/.gitbook/assets/image (44).png new file mode 100644 index 00000000..37b86f5e Binary files /dev/null and b/fr/.gitbook/assets/image (44).png differ diff --git a/fr/.gitbook/assets/image (45).png b/fr/.gitbook/assets/image (45).png new file mode 100644 index 00000000..271be4f5 Binary files /dev/null and b/fr/.gitbook/assets/image (45).png differ diff --git a/fr/.gitbook/assets/image (46).png b/fr/.gitbook/assets/image (46).png new file mode 100644 index 00000000..a817d39d Binary files /dev/null and b/fr/.gitbook/assets/image (46).png differ diff --git a/fr/.gitbook/assets/image (47).png b/fr/.gitbook/assets/image (47).png new file mode 100644 index 00000000..98ee4c12 Binary files /dev/null and b/fr/.gitbook/assets/image (47).png differ diff --git a/fr/.gitbook/assets/image (48).png b/fr/.gitbook/assets/image (48).png new file mode 100644 index 00000000..4f264640 Binary files /dev/null and b/fr/.gitbook/assets/image (48).png differ diff --git a/fr/.gitbook/assets/image (49).png b/fr/.gitbook/assets/image (49).png new file mode 100644 index 00000000..c57ce0db Binary files /dev/null and b/fr/.gitbook/assets/image (49).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..8b3f22d1 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..7327ba99 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..6798fc57 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png new file mode 100644 index 00000000..e58fad39 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..2e754028 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..f1c0f754 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..ac1b4de9 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..a99621b3 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..35e23324 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..7cb303e7 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..f38c3016 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..31ba94ab Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..9d9da9cb Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..b07c2549 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..ac8d8708 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..dc697fd3 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..ae46289c Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..82a62d3c Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..fc10c2c7 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..63a4b868 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1) (1).png new file mode 100644 index 00000000..9d857bcd Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1) (1).png new file mode 100644 index 00000000..c62ffad2 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (1) (2).png new file mode 100644 index 00000000..ff87b170 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (1).png b/fr/.gitbook/assets/image (5) (1) (1).png new file mode 100644 index 00000000..51329fec Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (1) (2).png b/fr/.gitbook/assets/image (5) (1) (2).png new file mode 100644 index 00000000..5cf127ec Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (1).png b/fr/.gitbook/assets/image (5) (1).png new file mode 100644 index 00000000..8b69ddc3 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (1).png differ diff --git a/fr/.gitbook/assets/image (5) (2).png b/fr/.gitbook/assets/image (5) (2).png new file mode 100644 index 00000000..85a77ebd Binary files /dev/null and b/fr/.gitbook/assets/image (5) (2).png differ diff --git a/fr/.gitbook/assets/image (5) (3).png b/fr/.gitbook/assets/image (5) (3).png new file mode 100644 index 00000000..4b6f3caf Binary files /dev/null and b/fr/.gitbook/assets/image (5) (3).png differ diff --git a/fr/.gitbook/assets/image (5) (4).png b/fr/.gitbook/assets/image (5) (4).png new file mode 100644 index 00000000..c2fe2124 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (4).png differ diff --git a/fr/.gitbook/assets/image (5) (5).png b/fr/.gitbook/assets/image (5) (5).png new file mode 100644 index 00000000..a67be3c7 Binary files /dev/null and b/fr/.gitbook/assets/image (5) (5).png differ diff --git a/fr/.gitbook/assets/image (5) (6).png b/fr/.gitbook/assets/image (5) (6).png new file mode 100644 index 00000000..8cae20db Binary files /dev/null and b/fr/.gitbook/assets/image (5) (6).png differ diff --git a/fr/.gitbook/assets/image (5).png b/fr/.gitbook/assets/image (5).png new file mode 100644 index 00000000..42dfa78e Binary files /dev/null and b/fr/.gitbook/assets/image (5).png differ diff --git a/fr/.gitbook/assets/image (50).png b/fr/.gitbook/assets/image (50).png new file mode 100644 index 00000000..ee42d003 Binary files /dev/null and b/fr/.gitbook/assets/image (50).png differ diff --git a/fr/.gitbook/assets/image (51).png b/fr/.gitbook/assets/image (51).png new file mode 100644 index 00000000..8ab2351e Binary files /dev/null and b/fr/.gitbook/assets/image (51).png differ diff --git a/fr/.gitbook/assets/image (52).png b/fr/.gitbook/assets/image (52).png new file mode 100644 index 00000000..4cb1b4a9 Binary files /dev/null and b/fr/.gitbook/assets/image (52).png differ diff --git a/fr/.gitbook/assets/image (53).png b/fr/.gitbook/assets/image (53).png new file mode 100644 index 00000000..98f95c27 Binary files /dev/null and b/fr/.gitbook/assets/image (53).png differ diff --git a/fr/.gitbook/assets/image (54).png b/fr/.gitbook/assets/image (54).png new file mode 100644 index 00000000..73793197 Binary files /dev/null and b/fr/.gitbook/assets/image (54).png differ diff --git a/fr/.gitbook/assets/image (55).png b/fr/.gitbook/assets/image (55).png new file mode 100644 index 00000000..9633de50 Binary files /dev/null and b/fr/.gitbook/assets/image (55).png differ diff --git a/fr/.gitbook/assets/image (56).png b/fr/.gitbook/assets/image (56).png new file mode 100644 index 00000000..2d47d097 Binary files /dev/null and b/fr/.gitbook/assets/image (56).png differ diff --git a/fr/.gitbook/assets/image (57).png b/fr/.gitbook/assets/image (57).png new file mode 100644 index 00000000..e4b4c3f9 Binary files /dev/null and b/fr/.gitbook/assets/image (57).png differ diff --git a/fr/.gitbook/assets/image (58).png b/fr/.gitbook/assets/image (58).png new file mode 100644 index 00000000..383cb564 Binary files /dev/null and b/fr/.gitbook/assets/image (58).png differ diff --git a/fr/.gitbook/assets/image (59).png b/fr/.gitbook/assets/image (59).png new file mode 100644 index 00000000..4e8dd10a Binary files /dev/null and b/fr/.gitbook/assets/image (59).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..6730b1b9 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..4996b1d0 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..568fbe6b Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c8568fc8 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..ba6f5153 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..ef6d8157 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..52dbd74e Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..9c59d546 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (2).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (2).png new file mode 100644 index 00000000..631f5d69 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2) (2).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..95cd6e1d Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..cdfbe504 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..619f2a15 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..ae521cdd Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..c8b0b3f3 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (3).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (3).png new file mode 100644 index 00000000..e7a404eb Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1) (3).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (1).png new file mode 100644 index 00000000..8688b58c Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (6) (1) (1) (1) (2).png new file mode 100644 index 00000000..d2b2b36b Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1) (1).png new file mode 100644 index 00000000..4a205bdb Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (1).png b/fr/.gitbook/assets/image (6) (1) (1).png new file mode 100644 index 00000000..8650e278 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (1) (2).png b/fr/.gitbook/assets/image (6) (1) (2).png new file mode 100644 index 00000000..a9a62b0c Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (6) (1).png b/fr/.gitbook/assets/image (6) (1).png new file mode 100644 index 00000000..efaeecc8 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (1).png differ diff --git a/fr/.gitbook/assets/image (6) (2).png b/fr/.gitbook/assets/image (6) (2).png new file mode 100644 index 00000000..d7617d21 Binary files /dev/null and b/fr/.gitbook/assets/image (6) (2).png differ diff --git a/fr/.gitbook/assets/image (6) (3).png b/fr/.gitbook/assets/image (6) (3).png new file mode 100644 index 00000000..cb24c93d Binary files /dev/null and b/fr/.gitbook/assets/image (6) (3).png differ diff --git a/fr/.gitbook/assets/image (6).png b/fr/.gitbook/assets/image (6).png new file mode 100644 index 00000000..358b54b1 Binary files /dev/null and b/fr/.gitbook/assets/image (6).png differ diff --git a/fr/.gitbook/assets/image (60).png b/fr/.gitbook/assets/image (60).png new file mode 100644 index 00000000..b099b007 Binary files /dev/null and b/fr/.gitbook/assets/image (60).png differ diff --git a/fr/.gitbook/assets/image (61).png b/fr/.gitbook/assets/image (61).png new file mode 100644 index 00000000..79ffbded Binary files /dev/null and b/fr/.gitbook/assets/image (61).png differ diff --git a/fr/.gitbook/assets/image (62).png b/fr/.gitbook/assets/image (62).png new file mode 100644 index 00000000..97b8fa60 Binary files /dev/null and b/fr/.gitbook/assets/image (62).png differ diff --git a/fr/.gitbook/assets/image (63).png b/fr/.gitbook/assets/image (63).png new file mode 100644 index 00000000..7bb5754a Binary files /dev/null and b/fr/.gitbook/assets/image (63).png differ diff --git a/fr/.gitbook/assets/image (64).png b/fr/.gitbook/assets/image (64).png new file mode 100644 index 00000000..8782e07d Binary files /dev/null and b/fr/.gitbook/assets/image (64).png differ diff --git a/fr/.gitbook/assets/image (65).png b/fr/.gitbook/assets/image (65).png new file mode 100644 index 00000000..6d2342f8 Binary files /dev/null and b/fr/.gitbook/assets/image (65).png differ diff --git a/fr/.gitbook/assets/image (66).png b/fr/.gitbook/assets/image (66).png new file mode 100644 index 00000000..675182ef Binary files /dev/null and b/fr/.gitbook/assets/image (66).png differ diff --git a/fr/.gitbook/assets/image (67).png b/fr/.gitbook/assets/image (67).png new file mode 100644 index 00000000..049b1740 Binary files /dev/null and b/fr/.gitbook/assets/image (67).png differ diff --git a/fr/.gitbook/assets/image (68).png b/fr/.gitbook/assets/image (68).png new file mode 100644 index 00000000..b494b784 Binary files /dev/null and b/fr/.gitbook/assets/image (68).png differ diff --git a/fr/.gitbook/assets/image (69).png b/fr/.gitbook/assets/image (69).png new file mode 100644 index 00000000..cb7a995d Binary files /dev/null and b/fr/.gitbook/assets/image (69).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..5e416f0e Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..a911824f Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..4c374bd1 Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..f3d7516a Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..333726dc Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..cdfbe504 Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..98d15ed2 Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..3af481de Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..ec7c36fe Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (1).png new file mode 100644 index 00000000..2d9376e5 Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (7) (1) (1) (1) (2).png new file mode 100644 index 00000000..730d7732 Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1) (1).png new file mode 100644 index 00000000..040b67cd Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (1).png b/fr/.gitbook/assets/image (7) (1) (1).png new file mode 100644 index 00000000..95f6aa3f Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (1) (2).png b/fr/.gitbook/assets/image (7) (1) (2).png new file mode 100644 index 00000000..c1a9d1ba Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (7) (1).png b/fr/.gitbook/assets/image (7) (1).png new file mode 100644 index 00000000..6b39bacd Binary files /dev/null and b/fr/.gitbook/assets/image (7) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (2) (1).png b/fr/.gitbook/assets/image (7) (2) (1).png new file mode 100644 index 00000000..c0cb1668 Binary files /dev/null and b/fr/.gitbook/assets/image (7) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (7) (2) (2).png b/fr/.gitbook/assets/image (7) (2) (2).png new file mode 100644 index 00000000..750a4dec Binary files /dev/null and b/fr/.gitbook/assets/image (7) (2) (2).png differ diff --git a/fr/.gitbook/assets/image (7) (2).png b/fr/.gitbook/assets/image (7) (2).png new file mode 100644 index 00000000..b2590f89 Binary files /dev/null and b/fr/.gitbook/assets/image (7) (2).png differ diff --git a/fr/.gitbook/assets/image (7).png b/fr/.gitbook/assets/image (7).png new file mode 100644 index 00000000..b9804240 Binary files /dev/null and b/fr/.gitbook/assets/image (7).png differ diff --git a/fr/.gitbook/assets/image (70).png b/fr/.gitbook/assets/image (70).png new file mode 100644 index 00000000..a068612f Binary files /dev/null and b/fr/.gitbook/assets/image (70).png differ diff --git a/fr/.gitbook/assets/image (71).png b/fr/.gitbook/assets/image (71).png new file mode 100644 index 00000000..60465c5a Binary files /dev/null and b/fr/.gitbook/assets/image (71).png differ diff --git a/fr/.gitbook/assets/image (72).png b/fr/.gitbook/assets/image (72).png new file mode 100644 index 00000000..18e0650a Binary files /dev/null and b/fr/.gitbook/assets/image (72).png differ diff --git a/fr/.gitbook/assets/image (73).png b/fr/.gitbook/assets/image (73).png new file mode 100644 index 00000000..eb313caa Binary files /dev/null and b/fr/.gitbook/assets/image (73).png differ diff --git a/fr/.gitbook/assets/image (74).png b/fr/.gitbook/assets/image (74).png new file mode 100644 index 00000000..c4486f67 Binary files /dev/null and b/fr/.gitbook/assets/image (74).png differ diff --git a/fr/.gitbook/assets/image (75).png b/fr/.gitbook/assets/image (75).png new file mode 100644 index 00000000..89738bd0 Binary files /dev/null and b/fr/.gitbook/assets/image (75).png differ diff --git a/fr/.gitbook/assets/image (76).png b/fr/.gitbook/assets/image (76).png new file mode 100644 index 00000000..822b64c6 Binary files /dev/null and b/fr/.gitbook/assets/image (76).png differ diff --git a/fr/.gitbook/assets/image (77).png b/fr/.gitbook/assets/image (77).png new file mode 100644 index 00000000..96a4803b Binary files /dev/null and b/fr/.gitbook/assets/image (77).png differ diff --git a/fr/.gitbook/assets/image (78).png b/fr/.gitbook/assets/image (78).png new file mode 100644 index 00000000..b94f78c1 Binary files /dev/null and b/fr/.gitbook/assets/image (78).png differ diff --git a/fr/.gitbook/assets/image (79).png b/fr/.gitbook/assets/image (79).png new file mode 100644 index 00000000..d5a07c4d Binary files /dev/null and b/fr/.gitbook/assets/image (79).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..b2bf4e1d Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..56ff6078 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..108fccd8 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..c26dd6ec Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..2163a982 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..f4815f46 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..061fa98a Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..9f67744b Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..635f5bff Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..17c51cda Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..da09887a Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..72a2fbfb Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..f1a5558c Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (1).png new file mode 100644 index 00000000..820fa75f Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (8) (1) (1) (1) (2).png new file mode 100644 index 00000000..fc799e6a Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1) (1).png new file mode 100644 index 00000000..b95af4d0 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1) (1).png b/fr/.gitbook/assets/image (8) (1) (1).png new file mode 100644 index 00000000..d71d4bf4 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (1).png b/fr/.gitbook/assets/image (8) (1).png new file mode 100644 index 00000000..cb2296c0 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (2) (1) (1).png b/fr/.gitbook/assets/image (8) (2) (1) (1).png new file mode 100644 index 00000000..9721de0b Binary files /dev/null and b/fr/.gitbook/assets/image (8) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (2) (1).png b/fr/.gitbook/assets/image (8) (2) (1).png new file mode 100644 index 00000000..c715f651 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (8) (2).png b/fr/.gitbook/assets/image (8) (2).png new file mode 100644 index 00000000..d57c0b07 Binary files /dev/null and b/fr/.gitbook/assets/image (8) (2).png differ diff --git a/fr/.gitbook/assets/image (8).png b/fr/.gitbook/assets/image (8).png new file mode 100644 index 00000000..8c466dad Binary files /dev/null and b/fr/.gitbook/assets/image (8).png differ diff --git a/fr/.gitbook/assets/image (80).png b/fr/.gitbook/assets/image (80).png new file mode 100644 index 00000000..b01b0ca3 Binary files /dev/null and b/fr/.gitbook/assets/image (80).png differ diff --git a/fr/.gitbook/assets/image (81).png b/fr/.gitbook/assets/image (81).png new file mode 100644 index 00000000..54665a31 Binary files /dev/null and b/fr/.gitbook/assets/image (81).png differ diff --git a/fr/.gitbook/assets/image (82).png b/fr/.gitbook/assets/image (82).png new file mode 100644 index 00000000..c668dd18 Binary files /dev/null and b/fr/.gitbook/assets/image (82).png differ diff --git a/fr/.gitbook/assets/image (83).png b/fr/.gitbook/assets/image (83).png new file mode 100644 index 00000000..bbb0da47 Binary files /dev/null and b/fr/.gitbook/assets/image (83).png differ diff --git a/fr/.gitbook/assets/image (84).png b/fr/.gitbook/assets/image (84).png new file mode 100644 index 00000000..a7f2b684 Binary files /dev/null and b/fr/.gitbook/assets/image (84).png differ diff --git a/fr/.gitbook/assets/image (85).png b/fr/.gitbook/assets/image (85).png new file mode 100644 index 00000000..1e867fb8 Binary files /dev/null and b/fr/.gitbook/assets/image (85).png differ diff --git a/fr/.gitbook/assets/image (86).png b/fr/.gitbook/assets/image (86).png new file mode 100644 index 00000000..1fa4bdf9 Binary files /dev/null and b/fr/.gitbook/assets/image (86).png differ diff --git a/fr/.gitbook/assets/image (87).png b/fr/.gitbook/assets/image (87).png new file mode 100644 index 00000000..2f020cde Binary files /dev/null and b/fr/.gitbook/assets/image (87).png differ diff --git a/fr/.gitbook/assets/image (88).png b/fr/.gitbook/assets/image (88).png new file mode 100644 index 00000000..b2716d71 Binary files /dev/null and b/fr/.gitbook/assets/image (88).png differ diff --git a/fr/.gitbook/assets/image (89).png b/fr/.gitbook/assets/image (89).png new file mode 100644 index 00000000..dcb2d13e Binary files /dev/null and b/fr/.gitbook/assets/image (89).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..2a56c02c Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..439ae037 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..da16bd74 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..0961390e Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..388b22a7 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..bc01445f Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1).png new file mode 100644 index 00000000..e403f932 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 00000000..6cf7ae39 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..1484e2b0 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2).png new file mode 100644 index 00000000..68b63505 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (3).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (3).png new file mode 100644 index 00000000..9b90b2a4 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1) (3).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (1).png new file mode 100644 index 00000000..e12e56ef Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (2) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (2) (1).png new file mode 100644 index 00000000..dc53fe76 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (2).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (2).png new file mode 100644 index 00000000..28393fd4 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (2).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1) (3).png b/fr/.gitbook/assets/image (9) (1) (1) (1) (3).png new file mode 100644 index 00000000..2643724f Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1) (3).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1) (1).png new file mode 100644 index 00000000..f4f59da1 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1) (1).png b/fr/.gitbook/assets/image (9) (1) (1).png new file mode 100644 index 00000000..701b82d1 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (1).png b/fr/.gitbook/assets/image (9) (1).png new file mode 100644 index 00000000..3e08d6f8 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (2) (1).png b/fr/.gitbook/assets/image (9) (2) (1).png new file mode 100644 index 00000000..2bc45dca Binary files /dev/null and b/fr/.gitbook/assets/image (9) (2) (1).png differ diff --git a/fr/.gitbook/assets/image (9) (2).png b/fr/.gitbook/assets/image (9) (2).png new file mode 100644 index 00000000..d660c992 Binary files /dev/null and b/fr/.gitbook/assets/image (9) (2).png differ diff --git a/fr/.gitbook/assets/image (9).png b/fr/.gitbook/assets/image (9).png new file mode 100644 index 00000000..68de06c9 Binary files /dev/null and b/fr/.gitbook/assets/image (9).png differ diff --git a/fr/.gitbook/assets/image (90).png b/fr/.gitbook/assets/image (90).png new file mode 100644 index 00000000..92235f44 Binary files /dev/null and b/fr/.gitbook/assets/image (90).png differ diff --git a/fr/.gitbook/assets/image (91).png b/fr/.gitbook/assets/image (91).png new file mode 100644 index 00000000..7c80698f Binary files /dev/null and b/fr/.gitbook/assets/image (91).png differ diff --git a/fr/.gitbook/assets/image (92).png b/fr/.gitbook/assets/image (92).png new file mode 100644 index 00000000..38b19459 Binary files /dev/null and b/fr/.gitbook/assets/image (92).png differ diff --git a/fr/.gitbook/assets/image (93).png b/fr/.gitbook/assets/image (93).png new file mode 100644 index 00000000..e6d72c4b Binary files /dev/null and b/fr/.gitbook/assets/image (93).png differ diff --git a/fr/.gitbook/assets/image (94).png b/fr/.gitbook/assets/image (94).png new file mode 100644 index 00000000..5c3e1805 Binary files /dev/null and b/fr/.gitbook/assets/image (94).png differ diff --git a/fr/.gitbook/assets/image (95) (1).png b/fr/.gitbook/assets/image (95) (1).png new file mode 100644 index 00000000..cdf7bcfa Binary files /dev/null and b/fr/.gitbook/assets/image (95) (1).png differ diff --git a/fr/.gitbook/assets/image (95).png b/fr/.gitbook/assets/image (95).png new file mode 100644 index 00000000..30c2d25c Binary files /dev/null and b/fr/.gitbook/assets/image (95).png differ diff --git a/fr/.gitbook/assets/image (96).png b/fr/.gitbook/assets/image (96).png new file mode 100644 index 00000000..431b030e Binary files /dev/null and b/fr/.gitbook/assets/image (96).png differ diff --git a/fr/.gitbook/assets/image (97).png b/fr/.gitbook/assets/image (97).png new file mode 100644 index 00000000..131ee883 Binary files /dev/null and b/fr/.gitbook/assets/image (97).png differ diff --git a/fr/.gitbook/assets/image (98).png b/fr/.gitbook/assets/image (98).png new file mode 100644 index 00000000..a7db7aac Binary files /dev/null and b/fr/.gitbook/assets/image (98).png differ diff --git a/fr/.gitbook/assets/image (99).png b/fr/.gitbook/assets/image (99).png new file mode 100644 index 00000000..0460f1bc Binary files /dev/null and b/fr/.gitbook/assets/image (99).png differ diff --git a/fr/.gitbook/assets/image.png b/fr/.gitbook/assets/image.png new file mode 100644 index 00000000..8c7a44f8 Binary files /dev/null and b/fr/.gitbook/assets/image.png differ diff --git a/fr/.gitbook/assets/image_airtable (1).png b/fr/.gitbook/assets/image_airtable (1).png new file mode 100644 index 00000000..f32ff749 Binary files /dev/null and b/fr/.gitbook/assets/image_airtable (1).png differ diff --git a/fr/.gitbook/assets/image_airtable.png b/fr/.gitbook/assets/image_airtable.png new file mode 100644 index 00000000..f32ff749 Binary files /dev/null and b/fr/.gitbook/assets/image_airtable.png differ diff --git a/fr/.gitbook/assets/image_csv (1).png b/fr/.gitbook/assets/image_csv (1).png new file mode 100644 index 00000000..2815c268 Binary files /dev/null and b/fr/.gitbook/assets/image_csv (1).png differ diff --git a/fr/.gitbook/assets/image_csv.png b/fr/.gitbook/assets/image_csv.png new file mode 100644 index 00000000..2815c268 Binary files /dev/null and b/fr/.gitbook/assets/image_csv.png differ diff --git a/fr/.gitbook/assets/image_custom-loader (1).png b/fr/.gitbook/assets/image_custom-loader (1).png new file mode 100644 index 00000000..4a2cfcf7 Binary files /dev/null and b/fr/.gitbook/assets/image_custom-loader (1).png differ diff --git a/fr/.gitbook/assets/image_custom-loader.png b/fr/.gitbook/assets/image_custom-loader.png new file mode 100644 index 00000000..4a2cfcf7 Binary files /dev/null and b/fr/.gitbook/assets/image_custom-loader.png differ diff --git a/fr/.gitbook/assets/image_openAI (1).png b/fr/.gitbook/assets/image_openAI (1).png new file mode 100644 index 00000000..9ad11174 Binary files /dev/null and b/fr/.gitbook/assets/image_openAI (1).png differ diff --git a/fr/.gitbook/assets/image_openAI.png b/fr/.gitbook/assets/image_openAI.png new file mode 100644 index 00000000..9ad11174 Binary files /dev/null and b/fr/.gitbook/assets/image_openAI.png differ diff --git a/fr/.gitbook/assets/langwatch/langwatch-creds.png b/fr/.gitbook/assets/langwatch/langwatch-creds.png new file mode 100644 index 00000000..6bf0bdab Binary files /dev/null and b/fr/.gitbook/assets/langwatch/langwatch-creds.png differ diff --git a/fr/.gitbook/assets/langwatch/langwatch-input.png b/fr/.gitbook/assets/langwatch/langwatch-input.png new file mode 100644 index 00000000..01708d77 Binary files /dev/null and b/fr/.gitbook/assets/langwatch/langwatch-input.png differ diff --git a/fr/.gitbook/assets/langwatch/langwatch-screenshot.png b/fr/.gitbook/assets/langwatch/langwatch-screenshot.png new file mode 100644 index 00000000..e13553db Binary files /dev/null and b/fr/.gitbook/assets/langwatch/langwatch-screenshot.png differ diff --git a/fr/.gitbook/assets/longGIF.gif b/fr/.gitbook/assets/longGIF.gif new file mode 100644 index 00000000..0e51d24f Binary files /dev/null and b/fr/.gitbook/assets/longGIF.gif differ diff --git a/fr/.gitbook/assets/mas01.png b/fr/.gitbook/assets/mas01.png new file mode 100644 index 00000000..93eea934 Binary files /dev/null and b/fr/.gitbook/assets/mas01.png differ diff --git a/fr/.gitbook/assets/mas04.png b/fr/.gitbook/assets/mas04.png new file mode 100644 index 00000000..718db44c Binary files /dev/null and b/fr/.gitbook/assets/mas04.png differ diff --git a/fr/.gitbook/assets/mas05.png b/fr/.gitbook/assets/mas05.png new file mode 100644 index 00000000..6094f848 Binary files /dev/null and b/fr/.gitbook/assets/mas05.png differ diff --git a/fr/.gitbook/assets/mas06.png b/fr/.gitbook/assets/mas06.png new file mode 100644 index 00000000..eeb86668 Binary files /dev/null and b/fr/.gitbook/assets/mas06.png differ diff --git a/fr/.gitbook/assets/mas07.png b/fr/.gitbook/assets/mas07.png new file mode 100644 index 00000000..697eb048 Binary files /dev/null and b/fr/.gitbook/assets/mas07.png differ diff --git a/fr/.gitbook/assets/mas08.png b/fr/.gitbook/assets/mas08.png new file mode 100644 index 00000000..1fee1997 Binary files /dev/null and b/fr/.gitbook/assets/mas08.png differ diff --git a/fr/.gitbook/assets/mem0/api-key.png b/fr/.gitbook/assets/mem0/api-key.png new file mode 100644 index 00000000..06b900ce Binary files /dev/null and b/fr/.gitbook/assets/mem0/api-key.png differ diff --git a/fr/.gitbook/assets/mem0/creds.png b/fr/.gitbook/assets/mem0/creds.png new file mode 100644 index 00000000..2ce5ad64 Binary files /dev/null and b/fr/.gitbook/assets/mem0/creds.png differ diff --git a/fr/.gitbook/assets/mem0/flowise-chat-1.png b/fr/.gitbook/assets/mem0/flowise-chat-1.png new file mode 100644 index 00000000..9cb2b0a4 Binary files /dev/null and b/fr/.gitbook/assets/mem0/flowise-chat-1.png differ diff --git a/fr/.gitbook/assets/mem0/flowise-chat-2.png b/fr/.gitbook/assets/mem0/flowise-chat-2.png new file mode 100644 index 00000000..01ffac8f Binary files /dev/null and b/fr/.gitbook/assets/mem0/flowise-chat-2.png differ diff --git a/fr/.gitbook/assets/mem0/flowise-flow.png b/fr/.gitbook/assets/mem0/flowise-flow.png new file mode 100644 index 00000000..3a452b25 Binary files /dev/null and b/fr/.gitbook/assets/mem0/flowise-flow.png differ diff --git a/fr/.gitbook/assets/mem0/flowise-memory.png b/fr/.gitbook/assets/mem0/flowise-memory.png new file mode 100644 index 00000000..621066ca Binary files /dev/null and b/fr/.gitbook/assets/mem0/flowise-memory.png differ diff --git a/fr/.gitbook/assets/mem0/mem0-settings.png b/fr/.gitbook/assets/mem0/mem0-settings.png new file mode 100644 index 00000000..3fc16a87 Binary files /dev/null and b/fr/.gitbook/assets/mem0/mem0-settings.png differ diff --git a/fr/.gitbook/assets/mem0/settings.png b/fr/.gitbook/assets/mem0/settings.png new file mode 100644 index 00000000..f7200f13 Binary files /dev/null and b/fr/.gitbook/assets/mem0/settings.png differ diff --git a/fr/.gitbook/assets/migration-guide/cloud-migration/3.png b/fr/.gitbook/assets/migration-guide/cloud-migration/3.png new file mode 100644 index 00000000..451ad970 Binary files /dev/null and b/fr/.gitbook/assets/migration-guide/cloud-migration/3.png differ diff --git a/fr/.gitbook/assets/mistral_ai/1.png b/fr/.gitbook/assets/mistral_ai/1.png new file mode 100644 index 00000000..0f7052a3 Binary files /dev/null and b/fr/.gitbook/assets/mistral_ai/1.png differ diff --git a/fr/.gitbook/assets/mistral_ai/2.png b/fr/.gitbook/assets/mistral_ai/2.png new file mode 100644 index 00000000..da08971f Binary files /dev/null and b/fr/.gitbook/assets/mistral_ai/2.png differ diff --git a/fr/.gitbook/assets/mistral_ai/3.png b/fr/.gitbook/assets/mistral_ai/3.png new file mode 100644 index 00000000..cab2f1ff Binary files /dev/null and b/fr/.gitbook/assets/mistral_ai/3.png differ diff --git a/fr/.gitbook/assets/mistral_ai/4.png b/fr/.gitbook/assets/mistral_ai/4.png new file mode 100644 index 00000000..64606413 Binary files /dev/null and b/fr/.gitbook/assets/mistral_ai/4.png differ diff --git a/fr/.gitbook/assets/multi-agent-diagram.svg b/fr/.gitbook/assets/multi-agent-diagram.svg new file mode 100644 index 00000000..07de3d51 --- /dev/null +++ b/fr/.gitbook/assets/multi-agent-diagram.svg @@ -0,0 +1,13 @@ + + + + + + + + Worker 1Worker 2Worker 3SupervisorUser \ No newline at end of file diff --git a/fr/.gitbook/assets/multi-docs-apple.png b/fr/.gitbook/assets/multi-docs-apple.png new file mode 100644 index 00000000..16761aa6 Binary files /dev/null and b/fr/.gitbook/assets/multi-docs-apple.png differ diff --git a/fr/.gitbook/assets/multi-docs-console.png b/fr/.gitbook/assets/multi-docs-console.png new file mode 100644 index 00000000..f7182f9e Binary files /dev/null and b/fr/.gitbook/assets/multi-docs-console.png differ diff --git a/fr/.gitbook/assets/multi-docs-tesla.png b/fr/.gitbook/assets/multi-docs-tesla.png new file mode 100644 index 00000000..9a639ff1 Binary files /dev/null and b/fr/.gitbook/assets/multi-docs-tesla.png differ diff --git a/fr/.gitbook/assets/multi-docs-upload.png b/fr/.gitbook/assets/multi-docs-upload.png new file mode 100644 index 00000000..b24c3b67 Binary files /dev/null and b/fr/.gitbook/assets/multi-docs-upload.png differ diff --git a/fr/.gitbook/assets/multi-docs-upsert.png b/fr/.gitbook/assets/multi-docs-upsert.png new file mode 100644 index 00000000..e0b3b679 Binary files /dev/null and b/fr/.gitbook/assets/multi-docs-upsert.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-container-exists.png b/fr/.gitbook/assets/nvidia-nim-container-exists.png new file mode 100644 index 00000000..ac7008cd Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-container-exists.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-1.png b/fr/.gitbook/assets/nvidia-nim-local-1.png new file mode 100644 index 00000000..456a272b Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-1.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-2.png b/fr/.gitbook/assets/nvidia-nim-local-2.png new file mode 100644 index 00000000..9b8b696f Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-2.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-3.png b/fr/.gitbook/assets/nvidia-nim-local-3.png new file mode 100644 index 00000000..941dc67d Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-3.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-4.png b/fr/.gitbook/assets/nvidia-nim-local-4.png new file mode 100644 index 00000000..f1de04d9 Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-4.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-5.png b/fr/.gitbook/assets/nvidia-nim-local-5.png new file mode 100644 index 00000000..be5dd548 Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-5.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-6.png b/fr/.gitbook/assets/nvidia-nim-local-6.png new file mode 100644 index 00000000..1e736e31 Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-6.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-7.png b/fr/.gitbook/assets/nvidia-nim-local-7.png new file mode 100644 index 00000000..b58fa30e Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-7.png differ diff --git a/fr/.gitbook/assets/nvidia-nim-local-8.png b/fr/.gitbook/assets/nvidia-nim-local-8.png new file mode 100644 index 00000000..0d835c5e Binary files /dev/null and b/fr/.gitbook/assets/nvidia-nim-local-8.png differ diff --git a/fr/.gitbook/assets/openai_openapi.yaml b/fr/.gitbook/assets/openai_openapi.yaml new file mode 100644 index 00000000..6d77f43c --- /dev/null +++ b/fr/.gitbook/assets/openai_openapi.yaml @@ -0,0 +1,3196 @@ +openapi: 3.0.0 +info: + title: OpenAI API + description: APIs for sampling from and fine-tuning language models + version: '1.1.0' +servers: + - url: https://api.openai.com/v1 +tags: +- name: OpenAI + description: The OpenAI REST API +paths: + /engines: + get: + operationId: listEngines + deprecated: true + tags: + - OpenAI + summary: Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListEnginesResponse' + x-oaiMeta: + name: List engines + group: engines + path: list + examples: + curl: | + curl https://api.openai.com/v1/engines \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Engine.list() + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.listEngines(); + response: | + { + "data": [ + { + "id": "engine-id-0", + "object": "engine", + "owner": "organization-owner", + "ready": true + }, + { + "id": "engine-id-2", + "object": "engine", + "owner": "organization-owner", + "ready": true + }, + { + "id": "engine-id-3", + "object": "engine", + "owner": "openai", + "ready": false + }, + ], + "object": "list" + } + + /engines/{engine_id}: + get: + operationId: retrieveEngine + deprecated: true + tags: + - OpenAI + summary: Retrieves a model instance, providing basic information about it such as the owner and availability. + parameters: + - in: path + name: engine_id + required: true + schema: + type: string + # ideally this will be an actual ID, so this will always work from browser + example: + davinci + description: &engine_id_description > + The ID of the engine to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/Engine' + x-oaiMeta: + name: Retrieve engine + group: engines + path: retrieve + examples: + curl: | + curl https://api.openai.com/v1/engines/VAR_model_id \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Engine.retrieve("VAR_model_id") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.retrieveEngine("VAR_model_id"); + response: | + { + "id": "VAR_model_id", + "object": "engine", + "owner": "openai", + "ready": true + } + + /completions: + post: + operationId: createCompletion + tags: + - OpenAI + summary: Creates a completion for the provided prompt and parameters + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionResponse' + x-oaiMeta: + name: Create completion + group: completions + path: create + examples: + curl: | + curl https://api.openai.com/v1/completions \ + -H 'Content-Type: application/json' \ + -H 'Authorization: Bearer YOUR_API_KEY' \ + -d '{ + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0 + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Completion.create( + model="VAR_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0 + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createCompletion({ + model: "VAR_model_id", + prompt: "Say this is a test", + max_tokens: 7, + temperature: 0, + }); + parameters: | + { + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0, + "top_p": 1, + "n": 1, + "stream": false, + "logprobs": null, + "stop": "\n" + } + response: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "VAR_model_id", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + /edits: + post: + operationId: createEdit + tags: + - OpenAI + summary: Creates a new edit for the provided input, instruction, and parameters + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEditRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEditResponse' + x-oaiMeta: + name: Create edit + group: edits + path: create + examples: + curl: | + curl https://api.openai.com/v1/edits \ + -H 'Content-Type: application/json' \ + -H 'Authorization: Bearer YOUR_API_KEY' \ + -d '{ + "model": "VAR_model_id", + "input": "What day of the wek is it?", + "instruction": "Fix the spelling mistakes" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Edit.create( + model="VAR_model_id", + input="What day of the wek is it?", + instruction="Fix the spelling mistakes" + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createEdit({ + model: "VAR_model_id", + input: "What day of the wek is it?", + instruction: "Fix the spelling mistakes", + }); + parameters: | + { + "model": "VAR_model_id", + "input": "What day of the wek is it?", + "instruction": "Fix the spelling mistakes", + } + response: | + { + "object": "edit", + "created": 1589478378, + "choices": [ + { + "text": "What day of the week is it?", + "index": 0, + } + ], + "usage": { + "prompt_tokens": 25, + "completion_tokens": 32, + "total_tokens": 57 + } + } + + /images/generations: + post: + operationId: createImage + tags: + - OpenAI + summary: Creates an image given a prompt. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateImageRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image + group: images + path: create + examples: + curl: | + curl https://api.openai.com/v1/images/generations \ + -H 'Content-Type: application/json' \ + -H 'Authorization: Bearer YOUR_API_KEY' \ + -d '{ + "prompt": "A cute baby sea otter", + "n": 2, + "size": "1024x1024" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Image.create( + prompt="A cute baby sea otter", + n=2, + size="1024x1024" + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createImage({ + prompt: "A cute baby sea otter", + n: 2, + size: "1024x1024", + }); + parameters: | + { + "prompt": "A cute baby sea otter", + "n": 2, + "size": "1024x1024" + } + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + + /images/edits: + post: + operationId: createImageEdit + tags: + - OpenAI + summary: Creates an edited or extended image given an original image and a prompt. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageEditRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image edit + group: images + path: create-edit + examples: + curl: | + curl https://api.openai.com/v1/images/edits \ + -H 'Authorization: Bearer YOUR_API_KEY' \ + -F image='@otter.png' \ + -F mask='@mask.png' \ + -F prompt="A cute baby sea otter wearing a beret" \ + -F n=2 \ + -F size="1024x1024" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Image.create_edit( + image=open("otter.png", "rb"), + mask=open("mask.png", "rb"), + prompt="A cute baby sea otter wearing a beret", + n=2, + size="1024x1024" + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createImageEdit( + fs.createReadStream("otter.png"), + fs.createReadStream("mask.png"), + "A cute baby sea otter wearing a beret", + 2, + "1024x1024" + ); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + + /images/variations: + post: + operationId: createImageVariation + tags: + - OpenAI + summary: Creates a variation of a given image. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageVariationRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image variation + group: images + path: create-variation + examples: + curl: | + curl https://api.openai.com/v1/images/variations \ + -H 'Authorization: Bearer YOUR_API_KEY' \ + -F image='@otter.png' \ + -F n=2 \ + -F size="1024x1024" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Image.create_variation( + image=open("otter.png", "rb"), + n=2, + size="1024x1024" + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createImageVariation( + fs.createReadStream("otter.png"), + 2, + "1024x1024" + ); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + + /embeddings: + post: + operationId: createEmbedding + tags: + - OpenAI + summary: Creates an embedding vector representing the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingResponse' + x-oaiMeta: + name: Create embeddings + group: embeddings + path: create + examples: + curl: | + curl https://api.openai.com/v1/embeddings \ + -X POST \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{"input": "The food was delicious and the waiter...", + "model": "text-embedding-ada-002"}' + + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Embedding.create( + model="text-embedding-ada-002", + input="The food was delicious and the waiter..." + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createEmbedding({ + model: "text-embedding-ada-002", + input: "The food was delicious and the waiter...", + }); + parameters: | + { + "model": "text-embedding-ada-002", + "input": "The food was delicious and the waiter..." + } + response: | + { + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1056 floats total for ada) + -0.0028842222, + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + + /engines/{engine_id}/search: + post: + operationId: createSearch + deprecated: true + tags: + - OpenAI + summary: | + The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. + + To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. + + The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. + parameters: + - in: path + name: engine_id + required: true + schema: + type: string + example: davinci + description: The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateSearchRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateSearchResponse' + x-oaiMeta: + name: Create search + group: searches + path: create + examples: + curl: | + curl https://api.openai.com/v1/engines/davinci/search \ + -H "Content-Type: application/json" \ + -H 'Authorization: Bearer YOUR_API_KEY' \ + -d '{ + "documents": ["White House", "hospital", "school"], + "query": "the president" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Engine("davinci").search( + documents=["White House", "hospital", "school"], + query="the president" + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createSearch("davinci", { + documents: ["White House", "hospital", "school"], + query: "the president", + }); + parameters: | + { + "documents": [ + "White House", + "hospital", + "school" + ], + "query": "the president" + } + response: | + { + "data": [ + { + "document": 0, + "object": "search_result", + "score": 215.412 + }, + { + "document": 1, + "object": "search_result", + "score": 40.316 + }, + { + "document": 2, + "object": "search_result", + "score": 55.226 + } + ], + "object": "list" + } + + /files: + get: + operationId: listFiles + tags: + - OpenAI + summary: Returns a list of files that belong to the user's organization. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListFilesResponse' + x-oaiMeta: + name: List files + group: files + path: list + examples: + curl: | + curl https://api.openai.com/v1/files \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.list() + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.listFiles(); + response: | + { + "data": [ + { + "id": "file-ccdDZrC3iZVNiQVeEA6Z66wf", + "object": "file", + "bytes": 175, + "created_at": 1613677385, + "filename": "train.jsonl", + "purpose": "search" + }, + { + "id": "file-XjGxS3KTG0uNmNOK362iJua3", + "object": "file", + "bytes": 140, + "created_at": 1613779121, + "filename": "puppy.jsonl", + "purpose": "search" + } + ], + "object": "list" + } + post: + operationId: createFile + tags: + - OpenAI + summary: | + Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateFileRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + x-oaiMeta: + name: Upload file + group: files + path: upload + examples: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -F purpose="fine-tune" \ + -F file='@mydata.jsonl' + + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.create( + file=open("mydata.jsonl", "rb"), + purpose='fine-tune' + ) + node.js: | + const fs = require("fs"); + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createFile( + fs.createReadStream("mydata.jsonl"), + "fine-tune" + ); + response: | + { + "id": "file-XjGxS3KTG0uNmNOK362iJua3", + "object": "file", + "bytes": 140, + "created_at": 1613779121, + "filename": "mydata.jsonl", + "purpose": "fine-tune" + } + + /files/{file_id}: + delete: + operationId: deleteFile + tags: + - OpenAI + summary: Delete a file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteFileResponse' + x-oaiMeta: + name: Delete file + group: files + path: delete + examples: + curl: | + curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \ + -X DELETE \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.delete("file-XjGxS3KTG0uNmNOK362iJua3") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.deleteFile("file-XjGxS3KTG0uNmNOK362iJua3"); + response: | + { + "id": "file-XjGxS3KTG0uNmNOK362iJua3", + "object": "file", + "deleted": true + } + get: + operationId: retrieveFile + tags: + - OpenAI + summary: Returns information about a specific file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + x-oaiMeta: + name: Retrieve file + group: files + path: retrieve + examples: + curl: | + curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3 \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.retrieve("file-XjGxS3KTG0uNmNOK362iJua3") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.retrieveFile("file-XjGxS3KTG0uNmNOK362iJua3"); + response: | + { + "id": "file-XjGxS3KTG0uNmNOK362iJua3", + "object": "file", + "bytes": 140, + "created_at": 1613779657, + "filename": "mydata.jsonl", + "purpose": "fine-tune" + } + + /files/{file_id}/content: + get: + operationId: downloadFile + tags: + - OpenAI + summary: Returns the contents of the specified file + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + type: string + x-oaiMeta: + name: Retrieve file content + group: files + path: retrieve-content + examples: + curl: | + curl https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3/content \ + -H 'Authorization: Bearer YOUR_API_KEY' > file.jsonl + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + content = openai.File.download("file-XjGxS3KTG0uNmNOK362iJua3") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.downloadFile("file-XjGxS3KTG0uNmNOK362iJua3"); + + /answers: + post: + operationId: createAnswer + deprecated: true + tags: + - OpenAI + summary: | + Answers the specified question using the provided documents and examples. + + The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAnswerRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAnswerResponse' + x-oaiMeta: + name: Create answer + group: answers + path: create + examples: + curl: | + curl https://api.openai.com/v1/answers \ + -X POST \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H 'Content-Type: application/json' \ + -d '{ + "documents": ["Puppy A is happy.", "Puppy B is sad."], + "question": "which puppy is happy?", + "search_model": "ada", + "model": "curie", + "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", + "examples": [["What is human life expectancy in the United States?","78 years."]], + "max_tokens": 5, + "stop": ["\n", "<|endoftext|>"] + }' + + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Answer.create( + search_model="ada", + model="curie", + question="which puppy is happy?", + documents=["Puppy A is happy.", "Puppy B is sad."], + examples_context="In 2017, U.S. life expectancy was 78.6 years.", + examples=[["What is human life expectancy in the United States?","78 years."]], + max_tokens=5, + stop=["\n", "<|endoftext|>"], + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createAnswer({ + search_model: "ada", + model: "curie", + question: "which puppy is happy?", + documents: ["Puppy A is happy.", "Puppy B is sad."], + examples_context: "In 2017, U.S. life expectancy was 78.6 years.", + examples: [["What is human life expectancy in the United States?","78 years."]], + max_tokens: 5, + stop: ["\n", "<|endoftext|>"], + }); + parameters: | + { + "documents": ["Puppy A is happy.", "Puppy B is sad."], + "question": "which puppy is happy?", + "search_model": "ada", + "model": "curie", + "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", + "examples": [["What is human life expectancy in the United States?","78 years."]], + "max_tokens": 5, + "stop": ["\n", "<|endoftext|>"] + } + response: | + { + "answers": [ + "puppy A." + ], + "completion": "cmpl-2euVa1kmKUuLpSX600M41125Mo9NI", + "model": "curie:2020-05-03", + "object": "answer", + "search_model": "ada", + "selected_documents": [ + { + "document": 0, + "text": "Puppy A is happy. " + }, + { + "document": 1, + "text": "Puppy B is sad. " + } + ] + } + + /classifications: + post: + operationId: createClassification + deprecated: true + tags: + - OpenAI + summary: | + Classifies the specified `query` using provided examples. + + The endpoint first [searches](/docs/api-reference/searches) over the labeled examples + to select the ones most relevant for the particular query. Then, the relevant examples + are combined with the query to construct a prompt to produce the final label via the + [completions](/docs/api-reference/completions) endpoint. + + Labeled examples can be provided via an uploaded `file`, or explicitly listed in the + request using the `examples` parameter for quick tests and small scale use cases. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateClassificationRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateClassificationResponse' + x-oaiMeta: + name: Create classification + group: classifications + path: create + examples: + curl: | + curl https://api.openai.com/v1/classifications \ + -X POST \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -H 'Content-Type: application/json' \ + -d '{ + "examples": [ + ["A happy moment", "Positive"], + ["I am sad.", "Negative"], + ["I am feeling awesome", "Positive"]], + "query": "It is a raining day :(", + "search_model": "ada", + "model": "curie", + "labels":["Positive", "Negative", "Neutral"] + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Classification.create( + search_model="ada", + model="curie", + examples=[ + ["A happy moment", "Positive"], + ["I am sad.", "Negative"], + ["I am feeling awesome", "Positive"] + ], + query="It is a raining day :(", + labels=["Positive", "Negative", "Neutral"], + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createClassification({ + search_model: "ada", + model: "curie", + examples: [ + ["A happy moment", "Positive"], + ["I am sad.", "Negative"], + ["I am feeling awesome", "Positive"] + ], + query:"It is a raining day :(", + labels: ["Positive", "Negative", "Neutral"], + }); + parameters: | + { + "examples": [ + ["A happy moment", "Positive"], + ["I am sad.", "Negative"], + ["I am feeling awesome", "Positive"] + ], + "labels": ["Positive", "Negative", "Neutral"], + "query": "It is a raining day :(", + "search_model": "ada", + "model": "curie" + } + response: | + { + "completion": "cmpl-2euN7lUVZ0d4RKbQqRV79IiiE6M1f", + "label": "Negative", + "model": "curie:2020-05-03", + "object": "classification", + "search_model": "ada", + "selected_examples": [ + { + "document": 1, + "label": "Negative", + "text": "I am sad." + }, + { + "document": 0, + "label": "Positive", + "text": "A happy moment" + }, + { + "document": 2, + "label": "Positive", + "text": "I am feeling awesome" + } + ] + } + + /fine-tunes: + post: + operationId: createFineTune + tags: + - OpenAI + summary: | + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about Fine-tuning](/docs/guides/fine-tuning) + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateFineTuneRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + x-oaiMeta: + name: Create fine-tune + group: fine-tunes + path: create + beta: true + examples: + curl: | + curl https://api.openai.com/v1/fine-tunes \ + -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer YOUR_API_KEY" \ + -d '{ + "training_file": "file-XGinujblHPwGLSztz8cPS8XY" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.create(training_file="file-XGinujblHPwGLSztz8cPS8XY") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createFineTune({ + training_file: "file-XGinujblHPwGLSztz8cPS8XY", + }); + response: | + { + "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807352, + "events": [ + { + "object": "fine-tune-event", + "created_at": 1614807352, + "level": "info", + "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." + } + ], + "fine_tuned_model": null, + "hyperparams": { + "batch_size": 4, + "learning_rate_multiplier": 0.1, + "n_epochs": 4, + "prompt_loss_weight": 0.1, + }, + "organization_id": "org-...", + "result_files": [], + "status": "pending", + "validation_files": [], + "training_files": [ + { + "id": "file-XGinujblHPwGLSztz8cPS8XY", + "object": "file", + "bytes": 1547276, + "created_at": 1610062281, + "filename": "my-data-train.jsonl", + "purpose": "fine-tune-train" + } + ], + "updated_at": 1614807352, + } + get: + operationId: listFineTunes + tags: + - OpenAI + summary: | + List your organization's fine-tuning jobs + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTunesResponse' + x-oaiMeta: + name: List fine-tunes + group: fine-tunes + path: list + beta: true + examples: + curl: | + curl https://api.openai.com/v1/fine-tunes \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.list() + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.listFineTunes(); + response: | + { + "object": "list", + "data": [ + { + "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807352, + "fine_tuned_model": null, + "hyperparams": { ... }, + "organization_id": "org-...", + "result_files": [], + "status": "pending", + "validation_files": [], + "training_files": [ { ... } ], + "updated_at": 1614807352, + }, + { ... }, + { ... } + ] + } + + /fine-tunes/{fine_tune_id}: + get: + operationId: retrieveFineTune + tags: + - OpenAI + summary: | + Gets info about the fine-tune job. + + [Learn more about Fine-tuning](/docs/guides/fine-tuning) + parameters: + - in: path + name: fine_tune_id + required: true + schema: + type: string + example: + ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tune job + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + x-oaiMeta: + name: Retrieve fine-tune + group: fine-tunes + path: retrieve + beta: true + examples: + curl: | + curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + -H "Authorization: Bearer YOUR_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.retrieve(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.retrieveFineTune("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + response: | + { + "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807352, + "events": [ + { + "object": "fine-tune-event", + "created_at": 1614807352, + "level": "info", + "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." + }, + { + "object": "fine-tune-event", + "created_at": 1614807356, + "level": "info", + "message": "Job started." + }, + { + "object": "fine-tune-event", + "created_at": 1614807861, + "level": "info", + "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Job succeeded." + } + ], + "fine_tuned_model": "curie:ft-acmeco-2021-03-03-21-44-20", + "hyperparams": { + "batch_size": 4, + "learning_rate_multiplier": 0.1, + "n_epochs": 4, + "prompt_loss_weight": 0.1, + }, + "organization_id": "org-...", + "result_files": [ + { + "id": "file-QQm6ZpqdNwAaVC3aSz5sWwLT", + "object": "file", + "bytes": 81509, + "created_at": 1614807863, + "filename": "compiled_results.csv", + "purpose": "fine-tune-results" + } + ], + "status": "succeeded", + "validation_files": [], + "training_files": [ + { + "id": "file-XGinujblHPwGLSztz8cPS8XY", + "object": "file", + "bytes": 1547276, + "created_at": 1610062281, + "filename": "my-data-train.jsonl", + "purpose": "fine-tune-train" + } + ], + "updated_at": 1614807865, + } + + /fine-tunes/{fine_tune_id}/cancel: + post: + operationId: cancelFineTune + tags: + - OpenAI + summary: | + Immediately cancel a fine-tune job. + parameters: + - in: path + name: fine_tune_id + required: true + schema: + type: string + example: + ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tune job to cancel + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + x-oaiMeta: + name: Cancel fine-tune + group: fine-tunes + path: cancel + beta: true + examples: + curl: | + curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ + -X POST \ + -H "Authorization: Bearer YOUR_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.cancel(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.cancelFineTune("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + response: | + { + "id": "ft-xhrpBbvVUzYGo8oUO1FY4nI7", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807770, + "events": [ { ... } ], + "fine_tuned_model": null, + "hyperparams": { ... }, + "organization_id": "org-...", + "result_files": [], + "status": "cancelled", + "validation_files": [], + "training_files": [ + { + "id": "file-XGinujblHPwGLSztz8cPS8XY", + "object": "file", + "bytes": 1547276, + "created_at": 1610062281, + "filename": "my-data-train.jsonl", + "purpose": "fine-tune-train" + } + ], + "updated_at": 1614807789, + } + + /fine-tunes/{fine_tune_id}/events: + get: + operationId: listFineTuneEvents + tags: + - OpenAI + summary: | + Get fine-grained status updates for a fine-tune job. + parameters: + - in: path + name: fine_tune_id + required: true + schema: + type: string + example: + ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tune job to get events for. + - in: query + name: stream + required: false + schema: + type: boolean + default: false + description: | + Whether to stream events for the fine-tune job. If set to true, + events will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available. The stream will terminate with a + `data: [DONE]` message when the job is finished (succeeded, cancelled, + or failed). + + If set to false, only events generated so far will be returned. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTuneEventsResponse' + x-oaiMeta: + name: List fine-tune events + group: fine-tunes + path: events + beta: true + examples: + curl: | + curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ + -H "Authorization: Bearer YOUR_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.list_events(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.listFineTuneEvents("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + response: | + { + "object": "list", + "data": [ + { + "object": "fine-tune-event", + "created_at": 1614807352, + "level": "info", + "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." + }, + { + "object": "fine-tune-event", + "created_at": 1614807356, + "level": "info", + "message": "Job started." + }, + { + "object": "fine-tune-event", + "created_at": 1614807861, + "level": "info", + "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT." + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Job succeeded." + } + ] + } + + /models: + get: + operationId: listModels + tags: + - OpenAI + summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListModelsResponse' + x-oaiMeta: + name: List models + group: models + path: list + examples: + curl: | + curl https://api.openai.com/v1/models \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.list() + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.listModels(); + response: | + { + "data": [ + { + "id": "model-id-0", + "object": "model", + "owned_by": "organization-owner", + "permission": [...] + }, + { + "id": "model-id-1", + "object": "model", + "owned_by": "organization-owner", + "permission": [...] + }, + { + "id": "model-id-2", + "object": "model", + "owned_by": "openai", + "permission": [...] + }, + ], + "object": "list" + } + + /models/{model}: + get: + operationId: retrieveModel + tags: + - OpenAI + summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + # ideally this will be an actual ID, so this will always work from browser + example: + text-davinci-001 + description: + The ID of the model to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/Model' + x-oaiMeta: + name: Retrieve model + group: models + path: retrieve + examples: + curl: | + curl https://api.openai.com/v1/models/VAR_model_id \ + -H 'Authorization: Bearer YOUR_API_KEY' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.retrieve("VAR_model_id") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.retrieveModel("VAR_model_id"); + response: | + { + "id": "VAR_model_id", + "object": "model", + "owned_by": "openai", + "permission": [...] + } + delete: + operationId: deleteModel + tags: + - OpenAI + summary: Delete a fine-tuned model. You must have the Owner role in your organization. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: curie:ft-acmeco-2021-03-03-21-44-20 + description: The model to delete + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteModelResponse' + x-oaiMeta: + name: Delete fine-tune model + group: fine-tunes + path: delete-model + beta: true + examples: + curl: | + curl https://api.openai.com/v1/models/curie:ft-acmeco-2021-03-03-21-44-20 \ + -X DELETE \ + -H "Authorization: Bearer YOUR_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.delete("curie:ft-acmeco-2021-03-03-21-44-20") + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.deleteModel('curie:ft-acmeco-2021-03-03-21-44-20'); + response: | + { + "id": "curie:ft-acmeco-2021-03-03-21-44-20", + "object": "model", + "deleted": true + } + + /moderations: + post: + operationId: createModeration + tags: + - OpenAI + summary: Classifies if text violates OpenAI's Content Policy + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateModerationRequest' + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateModerationResponse' + x-oaiMeta: + name: Create moderation + group: moderations + path: create + examples: + curl: | + curl https://api.openai.com/v1/moderations \ + -H 'Content-Type: application/json' \ + -H 'Authorization: Bearer YOUR_API_KEY' \ + -d '{ + "input": "I want to kill them." + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Moderation.create( + input="I want to kill them.", + ) + node.js: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const response = await openai.createModeration({ + input: "I want to kill them.", + }); + parameters: | + { + "input": "I want to kill them." + } + response: | + { + "id": "modr-5MWoLO", + "model": "text-moderation-001", + "results": [ + { + "categories": { + "hate": false, + "hate/threatening": true, + "self-harm": false, + "sexual": false, + "sexual/minors": false, + "violence": true, + "violence/graphic": false + }, + "category_scores": { + "hate": 0.22714105248451233, + "hate/threatening": 0.4132447838783264, + "self-harm": 0.005232391878962517, + "sexual": 0.01407341007143259, + "sexual/minors": 0.0038522258400917053, + "violence": 0.9223177433013916, + "violence/graphic": 0.036865197122097015 + }, + "flagged": true + } + ] + } + +components: + schemas: + ListEnginesResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/Engine' + required: + - object + - data + + ListModelsResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/Model' + required: + - object + - data + + DeleteModelResponse: + type: object + properties: + id: + type: string + object: + type: string + deleted: + type: boolean + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: &model_configuration + description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + type: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: '<|endoftext|>' + nullable: true + oneOf: + - type: string + default: '' + example: "This is a test." + - type: array + items: + type: string + default: '' + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + suffix: + description: + The suffix that comes after a completion of inserted text. + default: null + nullable: true + type: string + example: "test." + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + type: boolean + nullable: true + default: false + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + + CreateCompletionResponse: + type: object + properties: + id: + type: string + object: + type: string + created: + type: integer + model: + type: string + choices: + type: array + items: + type: object + properties: + text: + type: string + index: + type: integer + logprobs: + type: object + nullable: true + properties: + tokens: + type: array + items: + type: string + token_logprobs: + type: array + items: + type: number + top_logprobs: + type: array + items: + type: object + text_offset: + type: array + items: + type: integer + finish_reason: + type: string + usage: + type: object + properties: + prompt_tokens: + type: integer + completion_tokens: + type: integer + total_tokens: + type: integer + required: + - prompt_tokens + - completion_tokens + - total_tokens + required: + - id + - object + - created + - model + - choices + + CreateEditRequest: + type: object + properties: + model: *model_configuration + input: + description: + The input text to use as a starting point for the edit. + type: string + default: '' + nullable: true + example: "What day of the wek is it?" + instruction: + description: + The instruction that tells the model how to edit the prompt. + type: string + example: "Fix the spelling mistakes." + n: + type: integer + minimum: 1 + maximum: 20 + default: 1 + example: 1 + nullable: true + description: + How many edits to generate for the input and instruction. + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + required: + - model + - instruction + + CreateEditResponse: + type: object + properties: + object: + type: string + created: + type: integer + choices: + type: array + items: + type: object + properties: + text: + type: string + index: + type: integer + logprobs: + type: object + nullable: true + properties: + tokens: + type: array + items: + type: string + token_logprobs: + type: array + items: + type: number + top_logprobs: + type: array + items: + type: object + text_offset: + type: array + items: + type: integer + finish_reason: + type: string + usage: + type: object + properties: + prompt_tokens: + type: integer + completion_tokens: + type: integer + total_tokens: + type: integer + required: + - prompt_tokens + - completion_tokens + - total_tokens + required: + - object + - created + - choices + - usage + + CreateImageRequest: + type: object + properties: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter" + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: &images_size + type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: &images_response_format + type: string + enum: ["url", "b64_json"] + default: "url" + example: "url" + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + user: *end_user_param_configuration + required: + - prompt + + ImagesResponse: + properties: + created: + type: integer + data: + type: array + items: + type: object + properties: + url: + type: string + b64_json: + type: string + required: + - created + - data + + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + type: string + format: binary + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + n: *images_n + size: *images_size + response_format: *images_response_format + user: *end_user_param_configuration + required: + - prompt + - image + + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: string + format: binary + n: *images_n + size: *images_size + response_format: *images_response_format + user: *end_user_param_configuration + required: + - image + + CreateModerationRequest: + type: object + properties: + input: + description: The input text to classify + oneOf: + - type: string + default: '' + example: "I want to kill them." + - type: array + items: + type: string + default: '' + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + type: string + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + required: + - input + + CreateModerationResponse: + type: object + properties: + id: + type: string + model: + type: string + results: + type: array + items: + type: object + properties: + flagged: + type: boolean + categories: + type: object + properties: + hate: + type: boolean + hate/threatening: + type: boolean + self-harm: + type: boolean + sexual: + type: boolean + sexual/minors: + type: boolean + violence: + type: boolean + violence/graphic: + type: boolean + required: + - hate + - hate/threatening + - self-harm + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + properties: + hate: + type: number + hate/threatening: + type: number + self-harm: + type: number + sexual: + type: number + sexual/minors: + type: number + violence: + type: number + violence/graphic: + type: number + required: + - hate + - hate/threatening + - self-harm + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores + required: + - id + - model + - results + + CreateSearchRequest: + type: object + properties: + query: + description: Query to search against the documents. + type: string + example: "the president" + minLength: 1 + documents: + description: | + Up to 200 documents to search over, provided as a list of strings. + + The maximum document length (in tokens) is 2034 minus the number of tokens in the query. + + You should specify either `documents` or a `file`, but not both. + type: array + minItems: 1 + maxItems: 200 + items: + type: string + nullable: true + example: "['White House', 'hospital', 'school']" + file: + description: | + The ID of an uploaded file that contains documents to search over. + + You should specify either `documents` or a `file`, but not both. + type: string + nullable: true + max_rerank: + description: | + The maximum number of documents to be re-ranked and returned by search. + + This flag only takes effect when `file` is set. + type: integer + minimum: 1 + default: 200 + nullable: true + return_metadata: &return_metadata_configuration + description: | + A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field. + + This flag only takes effect when `file` is set. + type: boolean + default: false + nullable: true + user: *end_user_param_configuration + required: + - query + + CreateSearchResponse: + type: object + properties: + object: + type: string + model: + type: string + data: + type: array + items: + type: object + properties: + object: + type: string + document: + type: integer + score: + type: number + + ListFilesResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + required: + - object + - data + + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + + If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). + type: string + format: binary + purpose: + description: | + The intended purpose of the uploaded documents. + + Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. + + type: string + required: + - file + - purpose + + DeleteFileResponse: + type: object + properties: + id: + type: string + object: + type: string + deleted: + type: boolean + required: + - id + - object + - deleted + + CreateAnswerRequest: + type: object + additionalProperties: false + properties: + model: + description: ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. + type: string + question: + description: Question to get answered. + type: string + minLength: 1 + example: "What is the capital of Japan?" + examples: + description: List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. + type: array + minItems: 1 + maxItems: 200 + items: + type: array + minItems: 2 + maxItems: 2 + items: + type: string + minLength: 1 + example: "[['What is the capital of Canada?', 'Ottawa'], ['Which province is Ottawa in?', 'Ontario']]" + examples_context: + description: A text snippet containing the contextual information used to generate the answers for the `examples` you provide. + type: string + example: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border." + documents: + description: | + List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. + + You should specify either `documents` or a `file`, but not both. + type: array + maxItems: 200 + items: + type: string + example: "['Japan is an island country in East Asia, located in the northwest Pacific Ocean.', 'Tokyo is the capital and most populous prefecture of Japan.']" + nullable: true + file: + description: | + The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. + + You should specify either `documents` or a `file`, but not both. + type: string + nullable: true + search_model: &search_model_configuration + description: ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. + type: string + default: ada + nullable: true + max_rerank: + description: The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. + type: integer + default: 200 + nullable: true + temperature: + description: What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values mean the model will take more risks and value 0 (argmax sampling) works better for scenarios with a well-defined answer. + type: number + default: 0 + nullable: true + logprobs: &context_completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: | + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. + + When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. + max_tokens: + description: The maximum number of tokens allowed for the generated answer + type: integer + default: 16 + nullable: true + stop: + description: *completions_stop_description + default: null + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + nullable: true + n: + description: How many answers to generate for each question. + type: integer + minimum: 1 + maximum: 10 + default: 1 + nullable: true + logit_bias: *completions_logit_bias + return_metadata: *return_metadata_configuration + return_prompt: &return_prompt_configuration + description: If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. + type: boolean + default: false + nullable: true + expand: &expand_configuration + description: If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. + type: array + items: {} + nullable: true + default: [] + user: *end_user_param_configuration + required: + - model + - question + - examples + - examples_context + + CreateAnswerResponse: + type: object + properties: + object: + type: string + model: + type: string + search_model: + type: string + completion: + type: string + answers: + type: array + items: + type: string + selected_documents: + type: array + items: + type: object + properties: + document: + type: integer + text: + type: string + + CreateClassificationRequest: + type: object + additionalProperties: false + properties: + model: *model_configuration + query: + description: Query to be classified. + type: string + minLength: 1 + example: "The plot is not very attractive." + examples: + description: | + A list of examples with labels, in the following format: + + `[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]` + + All the label strings will be normalized to be capitalized. + + You should specify either `examples` or `file`, but not both. + type: array + minItems: 2 + maxItems: 200 + items: + type: array + minItems: 2 + maxItems: 2 + items: + type: string + minLength: 1 + example: "[['Do not see this film.', 'Negative'], ['Smart, provocative and blisteringly funny.', 'Positive']]" + nullable: true + file: + description: | + The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. + + You should specify either `examples` or `file`, but not both. + type: string + nullable: true + labels: + description: The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. + type: array + minItems: 2 + maxItems: 200 + default: null + items: + type: string + example: ["Positive", "Negative"] + nullable: true + search_model: *search_model_configuration + temperature: + description: + What sampling `temperature` to use. Higher values mean the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. + type: number + minimum: 0 + maximum: 2 + default: 0 + nullable: true + example: 0 + logprobs: *context_completions_logprobs_configuration + max_examples: + description: The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. + type: integer + default: 200 + nullable: true + logit_bias: *completions_logit_bias + return_prompt: *return_prompt_configuration + return_metadata: *return_metadata_configuration + expand: *expand_configuration + user: *end_user_param_configuration + required: + - model + - query + + CreateClassificationResponse: + type: object + properties: + object: + type: string + model: + type: string + search_model: + type: string + completion: + type: string + label: + type: string + selected_examples: + type: array + items: + type: object + properties: + document: + type: integer + text: + type: string + label: + type: string + + CreateFineTuneRequest: + type: object + properties: + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file, where each training + example is a JSON object with the keys "prompt" and "completion". + Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. + type: string + example: "file-ajSREls59WBbvgSzJSVWxMCB" + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). + Your train and validation data should be mutually exclusive. + + Your dataset must be formatted as a JSONL file, where each validation + example is a JSON object with the keys "prompt" and "completion". + Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. + type: string + nullable: true + example: "file-XjSREls59WBbvgSzJSVWxMCa" + model: + description: | + The name of the base model to fine-tune. You can select one of "ada", + "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. + To learn more about these models, see the + [Models](https://beta.openai.com/docs/models) documentation. + default: "curie" + type: string + nullable: true + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + default: 4 + type: integer + nullable: true + batch_size: + description: | + The batch size to use for training. The batch size is the number of + training examples used to train a single forward and backward pass. + + By default, the batch size will be dynamically configured to be + ~0.2% of the number of examples in the training set, capped at 256 - + in general, we've found that larger batch sizes tend to work better + for larger datasets. + default: null + type: integer + nullable: true + learning_rate_multiplier: + description: | + The learning rate multiplier to use for training. + The fine-tuning learning rate is the original learning rate used for + pretraining multiplied by this value. + + By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 + depending on final `batch_size` (larger learning rates tend to + perform better with larger batch sizes). We recommend experimenting + with values in the range 0.02 to 0.2 to see what produces the best + results. + default: null + type: number + nullable: true + prompt_loss_weight: + description: | + The weight to use for loss on the prompt tokens. This controls how + much the model tries to learn to generate the prompt (as compared + to the completion which always has a weight of 1.0), and can add + a stabilizing effect to training when completions are short. + + If prompts are extremely long (relative to completions), it may make + sense to reduce this weight so as to avoid over-prioritizing + learning the prompt. + default: 0.01 + type: number + nullable: true + compute_classification_metrics: + description: | + If set, we calculate classification-specific metrics such as accuracy + and F-1 score using the validation set at the end of every epoch. + These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). + + In order to compute classification metrics, you must provide a + `validation_file`. Additionally, you must + specify `classification_n_classes` for multiclass classification or + `classification_positive_class` for binary classification. + type: boolean + default: false + nullable: true + classification_n_classes: + description: | + The number of classes in a classification task. + + This parameter is required for multiclass classification. + type: integer + default: null + nullable: true + classification_positive_class: + description: | + The positive class in binary classification. + + This parameter is needed to generate precision, recall, and F1 + metrics when doing binary classification. + type: string + default: null + nullable: true + classification_betas: + description: | + If this is provided, we calculate F-beta scores at the specified + beta values. The F-beta score is a generalization of F-1 score. + This is only used for binary classification. + + With a beta of 1 (i.e. the F-1 score), precision and recall are + given the same weight. A larger beta score puts more weight on + recall and less on precision. A smaller beta score puts more weight + on precision and less on recall. + type: array + items: + type: number + example: [0.6, 1, 1.5, 2] + default: null + nullable: true + suffix: + description: | + A string of up to 40 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + type: string + minLength: 1 + maxLength: 40 + default: null + nullable: true + required: + - training_file + + ListFineTunesResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTune' + required: + - object + - data + + ListFineTuneEventsResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTuneEvent' + required: + - object + - data + + CreateEmbeddingRequest: + type: object + additionalProperties: false + properties: + model: *model_configuration + input: + description: | + Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + default: '' + example: "This is a test." + - type: array + items: + type: string + default: '' + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + user: *end_user_param_configuration + required: + - model + - input + + CreateEmbeddingResponse: + type: object + properties: + object: + type: string + model: + type: string + data: + type: array + items: + type: object + properties: + index: + type: integer + object: + type: string + embedding: + type: array + items: + type: number + required: + - index + - object + - embedding + usage: + type: object + properties: + prompt_tokens: + type: integer + total_tokens: + type: integer + required: + - prompt_tokens + - total_tokens + required: + - object + - model + - data + - usage + + Engine: + title: Engine + properties: + id: + type: string + object: + type: string + created: + type: integer + nullable: true + ready: + type: boolean + required: + - id + - object + - created + - ready + + Model: + title: Model + properties: + id: + type: string + object: + type: string + created: + type: integer + owned_by: + type: string + required: + - id + - object + - created + - owned_by + + OpenAIFile: + title: OpenAIFile + properties: + id: + type: string + object: + type: string + bytes: + type: integer + created_at: + type: integer + filename: + type: string + purpose: + type: string + status: + type: string + status_details: + type: object + nullable: true + required: + - id + - object + - bytes + - created_at + - filename + - purpose + + FineTune: + title: FineTune + properties: + id: + type: string + object: + type: string + created_at: + type: integer + updated_at: + type: integer + model: + type: string + fine_tuned_model: + type: string + nullable: true + organization_id: + type: string + status: + type: string + hyperparams: + type: object + training_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + validation_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + result_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + events: + type: array + items: + $ref: '#/components/schemas/FineTuneEvent' + required: + - id + - object + - created_at + - updated_at + - model + - fine_tuned_model + - organization_id + - status + - hyperparams + - training_files + - validation_files + - result_files + + FineTuneEvent: + title: FineTuneEvent + properties: + object: + type: string + created_at: + type: integer + level: + type: string + message: + type: string + required: + - object + - created_at + - level + - message + +x-oaiMeta: + groups: + - id: models + title: Models + description: | + List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. + - id: completions + title: Completions + description: | + Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + - id: edits + title: Edits + description: | + Given a prompt and an instruction, the model will return an edited version of the prompt. + - id: images + title: Images + description: | + Given a prompt and/or an input image, the model will generate a new image. + + Related guide: [Image generation](/docs/guides/images) + - id: embeddings + title: Embeddings + description: | + Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + + Related guide: [Embeddings](/docs/guides/embeddings) + - id: files + title: Files + description: | + Files are used to upload documents that can be used with features like [Fine-tuning](/docs/api-reference/fine-tunes). + - id: fine-tunes + title: Fine-tunes + description: | + Manage fine-tuning jobs to tailor a model to your specific training data. + + Related guide: [Fine-tune models](/docs/guides/fine-tuning) + - id: moderations + title: Moderations + description: | + Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + + Related guide: [Moderations](/docs/guides/moderation) + - id: searches + title: Searches + warning: + title: This endpoint is deprecated and will be removed on December 3rd, 2022 + message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272952-search-transition-guide). + description: | + Given a query and a set of documents or labels, the model ranks each document based on its semantic similarity to the provided query. + + Related guide: [Search](/docs/guides/search) + - id: classifications + title: Classifications + warning: + title: This endpoint is deprecated and will be removed on December 3rd, 2022 + message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272941-classifications-transition-guide). + description: | + Given a query and a set of labeled examples, the model will predict the most likely label for the query. Useful as a drop-in replacement for any ML classification or text-to-label task. + + Related guide: [Classification](/docs/guides/classifications) + - id: answers + title: Answers + warning: + title: This endpoint is deprecated and will be removed on December 3rd, 2022 + message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6233728-answers-transition-guide). + description: | + Given a question, a set of documents, and some examples, the API generates an answer to the question based on the information in the set of documents. This is useful for question-answering applications on sources of truth, like company documentation or a knowledge base. + + Related guide: [Question answering](/docs/guides/answers) + - id: engines + title: Engines + description: These endpoints describe and provide access to the various engines available in the API. + warning: + title: The Engines endpoints are deprecated. + message: Please use their replacement, [Models](/docs/api-reference/models), instead. [Learn more](https://help.openai.com/TODO). diff --git a/fr/.gitbook/assets/opik-1.png b/fr/.gitbook/assets/opik-1.png new file mode 100644 index 00000000..906b04a3 Binary files /dev/null and b/fr/.gitbook/assets/opik-1.png differ diff --git a/fr/.gitbook/assets/opik-2.png b/fr/.gitbook/assets/opik-2.png new file mode 100644 index 00000000..a561e4c7 Binary files /dev/null and b/fr/.gitbook/assets/opik-2.png differ diff --git a/fr/.gitbook/assets/opik-3.png b/fr/.gitbook/assets/opik-3.png new file mode 100644 index 00000000..88ae6422 Binary files /dev/null and b/fr/.gitbook/assets/opik-3.png differ diff --git a/fr/.gitbook/assets/opik-4.png b/fr/.gitbook/assets/opik-4.png new file mode 100644 index 00000000..69d816d5 Binary files /dev/null and b/fr/.gitbook/assets/opik-4.png differ diff --git a/fr/.gitbook/assets/oxylabs_document_loader.png b/fr/.gitbook/assets/oxylabs_document_loader.png new file mode 100644 index 00000000..9c607067 Binary files /dev/null and b/fr/.gitbook/assets/oxylabs_document_loader.png differ diff --git a/fr/.gitbook/assets/phoenix/phoenix-1.png b/fr/.gitbook/assets/phoenix/phoenix-1.png new file mode 100644 index 00000000..8a466a29 Binary files /dev/null and b/fr/.gitbook/assets/phoenix/phoenix-1.png differ diff --git a/fr/.gitbook/assets/phoenix/phoenix-2.png b/fr/.gitbook/assets/phoenix/phoenix-2.png new file mode 100644 index 00000000..12fa4fdd Binary files /dev/null and b/fr/.gitbook/assets/phoenix/phoenix-2.png differ diff --git a/fr/.gitbook/assets/phoenix/phoenix-3.png b/fr/.gitbook/assets/phoenix/phoenix-3.png new file mode 100644 index 00000000..10548706 Binary files /dev/null and b/fr/.gitbook/assets/phoenix/phoenix-3.png differ diff --git a/fr/.gitbook/assets/pinecone_1.png b/fr/.gitbook/assets/pinecone_1.png new file mode 100644 index 00000000..efd45d48 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_1.png differ diff --git a/fr/.gitbook/assets/pinecone_2.png b/fr/.gitbook/assets/pinecone_2.png new file mode 100644 index 00000000..1cdfa258 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_2.png differ diff --git a/fr/.gitbook/assets/pinecone_3.png b/fr/.gitbook/assets/pinecone_3.png new file mode 100644 index 00000000..495497d3 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_3.png differ diff --git a/fr/.gitbook/assets/pinecone_4.png b/fr/.gitbook/assets/pinecone_4.png new file mode 100644 index 00000000..6674e759 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_4.png differ diff --git a/fr/.gitbook/assets/pinecone_5.png b/fr/.gitbook/assets/pinecone_5.png new file mode 100644 index 00000000..798beb4e Binary files /dev/null and b/fr/.gitbook/assets/pinecone_5.png differ diff --git a/fr/.gitbook/assets/pinecone_6.png b/fr/.gitbook/assets/pinecone_6.png new file mode 100644 index 00000000..41accc77 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_6.png differ diff --git a/fr/.gitbook/assets/pinecone_7.png b/fr/.gitbook/assets/pinecone_7.png new file mode 100644 index 00000000..f902d4c2 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_7.png differ diff --git a/fr/.gitbook/assets/pinecone_8.png b/fr/.gitbook/assets/pinecone_8.png new file mode 100644 index 00000000..d8bf1bc0 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_8.png differ diff --git a/fr/.gitbook/assets/pinecone_llama_chatflow.png b/fr/.gitbook/assets/pinecone_llama_chatflow.png new file mode 100644 index 00000000..563f6565 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_llama_chatflow.png differ diff --git a/fr/.gitbook/assets/pinecone_llama_upsert.png b/fr/.gitbook/assets/pinecone_llama_upsert.png new file mode 100644 index 00000000..7b8e30fc Binary files /dev/null and b/fr/.gitbook/assets/pinecone_llama_upsert.png differ diff --git a/fr/.gitbook/assets/pinecone_llamaindex.png b/fr/.gitbook/assets/pinecone_llamaindex.png new file mode 100644 index 00000000..32977e96 Binary files /dev/null and b/fr/.gitbook/assets/pinecone_llamaindex.png differ diff --git a/fr/.gitbook/assets/qdrant/1.png b/fr/.gitbook/assets/qdrant/1.png new file mode 100644 index 00000000..41e3dad8 Binary files /dev/null and b/fr/.gitbook/assets/qdrant/1.png differ diff --git a/fr/.gitbook/assets/qdrant/2.png b/fr/.gitbook/assets/qdrant/2.png new file mode 100644 index 00000000..630b9c27 Binary files /dev/null and b/fr/.gitbook/assets/qdrant/2.png differ diff --git a/fr/.gitbook/assets/qdrant/3.png b/fr/.gitbook/assets/qdrant/3.png new file mode 100644 index 00000000..58dc5cd7 Binary files /dev/null and b/fr/.gitbook/assets/qdrant/3.png differ diff --git a/fr/.gitbook/assets/render/1.png b/fr/.gitbook/assets/render/1.png new file mode 100644 index 00000000..60515e19 Binary files /dev/null and b/fr/.gitbook/assets/render/1.png differ diff --git a/fr/.gitbook/assets/render/10.png b/fr/.gitbook/assets/render/10.png new file mode 100644 index 00000000..30ade407 Binary files /dev/null and b/fr/.gitbook/assets/render/10.png differ diff --git a/fr/.gitbook/assets/render/11.png b/fr/.gitbook/assets/render/11.png new file mode 100644 index 00000000..39248e0f Binary files /dev/null and b/fr/.gitbook/assets/render/11.png differ diff --git a/fr/.gitbook/assets/render/2.png b/fr/.gitbook/assets/render/2.png new file mode 100644 index 00000000..9105e091 Binary files /dev/null and b/fr/.gitbook/assets/render/2.png differ diff --git a/fr/.gitbook/assets/render/3.png b/fr/.gitbook/assets/render/3.png new file mode 100644 index 00000000..b74c7452 Binary files /dev/null and b/fr/.gitbook/assets/render/3.png differ diff --git a/fr/.gitbook/assets/render/4.png b/fr/.gitbook/assets/render/4.png new file mode 100644 index 00000000..bb7d83e9 Binary files /dev/null and b/fr/.gitbook/assets/render/4.png differ diff --git a/fr/.gitbook/assets/render/5.png b/fr/.gitbook/assets/render/5.png new file mode 100644 index 00000000..8956f03b Binary files /dev/null and b/fr/.gitbook/assets/render/5.png differ diff --git a/fr/.gitbook/assets/render/6.png b/fr/.gitbook/assets/render/6.png new file mode 100644 index 00000000..07fe0e82 Binary files /dev/null and b/fr/.gitbook/assets/render/6.png differ diff --git a/fr/.gitbook/assets/render/7.png b/fr/.gitbook/assets/render/7.png new file mode 100644 index 00000000..efb7b057 Binary files /dev/null and b/fr/.gitbook/assets/render/7.png differ diff --git a/fr/.gitbook/assets/render/8.png b/fr/.gitbook/assets/render/8.png new file mode 100644 index 00000000..c3cc2f55 Binary files /dev/null and b/fr/.gitbook/assets/render/8.png differ diff --git a/fr/.gitbook/assets/render/9.png b/fr/.gitbook/assets/render/9.png new file mode 100644 index 00000000..e0a86f22 Binary files /dev/null and b/fr/.gitbook/assets/render/9.png differ diff --git a/fr/.gitbook/assets/sa-agent.png b/fr/.gitbook/assets/sa-agent.png new file mode 100644 index 00000000..7de9c9ad Binary files /dev/null and b/fr/.gitbook/assets/sa-agent.png differ diff --git a/fr/.gitbook/assets/sa-llm.png b/fr/.gitbook/assets/sa-llm.png new file mode 100644 index 00000000..194e1bf3 Binary files /dev/null and b/fr/.gitbook/assets/sa-llm.png differ diff --git a/fr/.gitbook/assets/sa-loop (1).png b/fr/.gitbook/assets/sa-loop (1).png new file mode 100644 index 00000000..57f8e164 Binary files /dev/null and b/fr/.gitbook/assets/sa-loop (1).png differ diff --git a/fr/.gitbook/assets/sa-loop.png b/fr/.gitbook/assets/sa-loop.png new file mode 100644 index 00000000..57f8e164 Binary files /dev/null and b/fr/.gitbook/assets/sa-loop.png differ diff --git a/fr/.gitbook/assets/savereuse.png b/fr/.gitbook/assets/savereuse.png new file mode 100644 index 00000000..e44dcdab Binary files /dev/null and b/fr/.gitbook/assets/savereuse.png differ diff --git a/fr/.gitbook/assets/screely-1687030897806.png b/fr/.gitbook/assets/screely-1687030897806.png new file mode 100644 index 00000000..2b5d8b7d Binary files /dev/null and b/fr/.gitbook/assets/screely-1687030897806.png differ diff --git a/fr/.gitbook/assets/screely-1687030924019.png b/fr/.gitbook/assets/screely-1687030924019.png new file mode 100644 index 00000000..1e6f1a89 Binary files /dev/null and b/fr/.gitbook/assets/screely-1687030924019.png differ diff --git a/fr/.gitbook/assets/screely-1689640211454.png b/fr/.gitbook/assets/screely-1689640211454.png new file mode 100644 index 00000000..e5d97016 Binary files /dev/null and b/fr/.gitbook/assets/screely-1689640211454.png differ diff --git a/fr/.gitbook/assets/screely-1691756705932.png b/fr/.gitbook/assets/screely-1691756705932.png new file mode 100644 index 00000000..8f78243b Binary files /dev/null and b/fr/.gitbook/assets/screely-1691756705932.png differ diff --git a/fr/.gitbook/assets/screely-1691758397783.png b/fr/.gitbook/assets/screely-1691758397783.png new file mode 100644 index 00000000..f86ad79d Binary files /dev/null and b/fr/.gitbook/assets/screely-1691758397783.png differ diff --git a/fr/.gitbook/assets/screely-1691758990676.png b/fr/.gitbook/assets/screely-1691758990676.png new file mode 100644 index 00000000..b1a367fd Binary files /dev/null and b/fr/.gitbook/assets/screely-1691758990676.png differ diff --git a/fr/.gitbook/assets/screely-1698767992182.png b/fr/.gitbook/assets/screely-1698767992182.png new file mode 100644 index 00000000..7a0110c2 Binary files /dev/null and b/fr/.gitbook/assets/screely-1698767992182.png differ diff --git a/fr/.gitbook/assets/screely-1699893014634.png b/fr/.gitbook/assets/screely-1699893014634.png new file mode 100644 index 00000000..7fdd0c41 Binary files /dev/null and b/fr/.gitbook/assets/screely-1699893014634.png differ diff --git a/fr/.gitbook/assets/screely-1699894602544.png b/fr/.gitbook/assets/screely-1699894602544.png new file mode 100644 index 00000000..8f70ff8a Binary files /dev/null and b/fr/.gitbook/assets/screely-1699894602544.png differ diff --git a/fr/.gitbook/assets/screely-1699896158130.png b/fr/.gitbook/assets/screely-1699896158130.png new file mode 100644 index 00000000..fa1ccff7 Binary files /dev/null and b/fr/.gitbook/assets/screely-1699896158130.png differ diff --git a/fr/.gitbook/assets/screely-1700662138252.png b/fr/.gitbook/assets/screely-1700662138252.png new file mode 100644 index 00000000..c228b3fb Binary files /dev/null and b/fr/.gitbook/assets/screely-1700662138252.png differ diff --git a/fr/.gitbook/assets/screely-1749593961545.png b/fr/.gitbook/assets/screely-1749593961545.png new file mode 100644 index 00000000..38423547 Binary files /dev/null and b/fr/.gitbook/assets/screely-1749593961545.png differ diff --git a/fr/.gitbook/assets/screely-1749594035877.png b/fr/.gitbook/assets/screely-1749594035877.png new file mode 100644 index 00000000..bf9cd3a2 Binary files /dev/null and b/fr/.gitbook/assets/screely-1749594035877.png differ diff --git a/fr/.gitbook/assets/screely-1749594614881.png b/fr/.gitbook/assets/screely-1749594614881.png new file mode 100644 index 00000000..feafeb4f Binary files /dev/null and b/fr/.gitbook/assets/screely-1749594614881.png differ diff --git a/fr/.gitbook/assets/screely-1749594631028.png b/fr/.gitbook/assets/screely-1749594631028.png new file mode 100644 index 00000000..6f44fa0e Binary files /dev/null and b/fr/.gitbook/assets/screely-1749594631028.png differ diff --git a/fr/.gitbook/assets/sealos/deployment/1.jpg b/fr/.gitbook/assets/sealos/deployment/1.jpg new file mode 100644 index 00000000..1452a2b2 Binary files /dev/null and b/fr/.gitbook/assets/sealos/deployment/1.jpg differ diff --git a/fr/.gitbook/assets/sealos/deployment/2.png b/fr/.gitbook/assets/sealos/deployment/2.png new file mode 100644 index 00000000..aa69c6bc Binary files /dev/null and b/fr/.gitbook/assets/sealos/deployment/2.png differ diff --git a/fr/.gitbook/assets/sealos/deployment/3.png b/fr/.gitbook/assets/sealos/deployment/3.png new file mode 100644 index 00000000..d8f1242f Binary files /dev/null and b/fr/.gitbook/assets/sealos/deployment/3.png differ diff --git a/fr/.gitbook/assets/sealos/deployment/4.png b/fr/.gitbook/assets/sealos/deployment/4.png new file mode 100644 index 00000000..198cf347 Binary files /dev/null and b/fr/.gitbook/assets/sealos/deployment/4.png differ diff --git a/fr/.gitbook/assets/seq-00.png b/fr/.gitbook/assets/seq-00.png new file mode 100644 index 00000000..52c437a8 Binary files /dev/null and b/fr/.gitbook/assets/seq-00.png differ diff --git a/fr/.gitbook/assets/seq-01.png b/fr/.gitbook/assets/seq-01.png new file mode 100644 index 00000000..d9e8f3a6 Binary files /dev/null and b/fr/.gitbook/assets/seq-01.png differ diff --git a/fr/.gitbook/assets/seq-02.png b/fr/.gitbook/assets/seq-02.png new file mode 100644 index 00000000..7d99d317 Binary files /dev/null and b/fr/.gitbook/assets/seq-02.png differ diff --git a/fr/.gitbook/assets/seq-03.png b/fr/.gitbook/assets/seq-03.png new file mode 100644 index 00000000..bcd9409b Binary files /dev/null and b/fr/.gitbook/assets/seq-03.png differ diff --git a/fr/.gitbook/assets/seq-04.png b/fr/.gitbook/assets/seq-04.png new file mode 100644 index 00000000..97868ec4 Binary files /dev/null and b/fr/.gitbook/assets/seq-04.png differ diff --git a/fr/.gitbook/assets/seq-05.png b/fr/.gitbook/assets/seq-05.png new file mode 100644 index 00000000..cf05b37d Binary files /dev/null and b/fr/.gitbook/assets/seq-05.png differ diff --git a/fr/.gitbook/assets/seq-06.png b/fr/.gitbook/assets/seq-06.png new file mode 100644 index 00000000..9aa40ae9 Binary files /dev/null and b/fr/.gitbook/assets/seq-06.png differ diff --git a/fr/.gitbook/assets/seq-07.png b/fr/.gitbook/assets/seq-07.png new file mode 100644 index 00000000..fffa2e7c Binary files /dev/null and b/fr/.gitbook/assets/seq-07.png differ diff --git a/fr/.gitbook/assets/seq-08.png b/fr/.gitbook/assets/seq-08.png new file mode 100644 index 00000000..12f037bd Binary files /dev/null and b/fr/.gitbook/assets/seq-08.png differ diff --git a/fr/.gitbook/assets/seq-09.png b/fr/.gitbook/assets/seq-09.png new file mode 100644 index 00000000..080d0519 Binary files /dev/null and b/fr/.gitbook/assets/seq-09.png differ diff --git a/fr/.gitbook/assets/seq-10.png b/fr/.gitbook/assets/seq-10.png new file mode 100644 index 00000000..7db39188 Binary files /dev/null and b/fr/.gitbook/assets/seq-10.png differ diff --git a/fr/.gitbook/assets/seq-11.png b/fr/.gitbook/assets/seq-11.png new file mode 100644 index 00000000..f052461e Binary files /dev/null and b/fr/.gitbook/assets/seq-11.png differ diff --git a/fr/.gitbook/assets/seq-12.png b/fr/.gitbook/assets/seq-12.png new file mode 100644 index 00000000..494ea054 Binary files /dev/null and b/fr/.gitbook/assets/seq-12.png differ diff --git a/fr/.gitbook/assets/seq-14 (1).png b/fr/.gitbook/assets/seq-14 (1).png new file mode 100644 index 00000000..fb09eba7 Binary files /dev/null and b/fr/.gitbook/assets/seq-14 (1).png differ diff --git a/fr/.gitbook/assets/seq-14.png b/fr/.gitbook/assets/seq-14.png new file mode 100644 index 00000000..fb09eba7 Binary files /dev/null and b/fr/.gitbook/assets/seq-14.png differ diff --git a/fr/.gitbook/assets/seq-16 (1).png b/fr/.gitbook/assets/seq-16 (1).png new file mode 100644 index 00000000..2e317ee4 Binary files /dev/null and b/fr/.gitbook/assets/seq-16 (1).png differ diff --git a/fr/.gitbook/assets/seq-20.png b/fr/.gitbook/assets/seq-20.png new file mode 100644 index 00000000..1e00abb8 Binary files /dev/null and b/fr/.gitbook/assets/seq-20.png differ diff --git a/fr/.gitbook/assets/seq-21.svg b/fr/.gitbook/assets/seq-21.svg new file mode 100644 index 00000000..34db743f --- /dev/null +++ b/fr/.gitbook/assets/seq-21.svg @@ -0,0 +1,13 @@ + + + + + + + + Agent 1Agent 2Conditionalagent_toolLoopStartEnd \ No newline at end of file diff --git a/fr/.gitbook/assets/seq-end-node.png b/fr/.gitbook/assets/seq-end-node.png new file mode 100644 index 00000000..76b1d08e Binary files /dev/null and b/fr/.gitbook/assets/seq-end-node.png differ diff --git a/fr/.gitbook/assets/simplestore.png b/fr/.gitbook/assets/simplestore.png new file mode 100644 index 00000000..24f9b252 Binary files /dev/null and b/fr/.gitbook/assets/simplestore.png differ diff --git a/fr/.gitbook/assets/spider.png b/fr/.gitbook/assets/spider.png new file mode 100644 index 00000000..2cc0bba6 Binary files /dev/null and b/fr/.gitbook/assets/spider.png differ diff --git a/fr/.gitbook/assets/spider_example_usage.png b/fr/.gitbook/assets/spider_example_usage.png new file mode 100644 index 00000000..6e53d3c0 Binary files /dev/null and b/fr/.gitbook/assets/spider_example_usage.png differ diff --git a/fr/.gitbook/assets/streaming-1.webp b/fr/.gitbook/assets/streaming-1.webp new file mode 100644 index 00000000..df178d8e Binary files /dev/null and b/fr/.gitbook/assets/streaming-1.webp differ diff --git a/fr/.gitbook/assets/subagents.png b/fr/.gitbook/assets/subagents.png new file mode 100644 index 00000000..58c96341 Binary files /dev/null and b/fr/.gitbook/assets/subagents.png differ diff --git a/fr/.gitbook/assets/subagentstool.png b/fr/.gitbook/assets/subagentstool.png new file mode 100644 index 00000000..d21112dc Binary files /dev/null and b/fr/.gitbook/assets/subagentstool.png differ diff --git a/fr/.gitbook/assets/swagger (1) (1) (1).yml b/fr/.gitbook/assets/swagger (1) (1) (1).yml new file mode 100644 index 00000000..9e347ad5 --- /dev/null +++ b/fr/.gitbook/assets/swagger (1) (1) (1).yml @@ -0,0 +1,2467 @@ +tags: + - name: apikey + - name: assistants + - name: chatmessage + - name: chatflows + - name: credentials + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /apikey: + post: + tags: + - apikey + security: + - bearerAuth: [] + operationId: createApiKey + summary: Add new api key + description: Add new api key + requestBody: + content: + application/json: + schema: + type: object + properties: + keyName: + type: string + example: 'someKeyName' + required: true + responses: + '200': + description: Api key created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + application/xml: + schema: + $ref: '#/components/schemas/ApiKey' + '400': + description: Invalid keyName provided + '404': + description: Api Key not found + '422': + description: Validation exception + get: + tags: + - apikey + security: + - bearerAuth: [] + summary: List all API keys + description: List all API keys + operationId: listApiKey + responses: + '200': + description: A list of API keys + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ApiKey' + application/xml: + schema: + type: array + items: + $ref: '#/components/schemas/ApiKey' + '500': + description: Internal error + /apikey/{id}: + put: + tags: + - apikey + security: + - bearerAuth: [] + summary: Update API key name + description: Update API key name + operationId: updateApiKey + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Api Key ID + requestBody: + content: + application/json: + schema: + type: object + properties: + keyName: + type: string + example: 'someKeyName' + responses: + '200': + description: Api key updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + application/xml: + schema: + $ref: '#/components/schemas/ApiKey' + '400': + description: The specified ID is invalid + '404': + description: Api Key with the specified ID was not found + '500': + description: Internal error + delete: + tags: + - apikey + security: + - bearerAuth: [] + summary: Delete API key + description: Delete API key + operationId: deleteApiKey + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Api Key ID + responses: + '200': + description: Api key deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Api Key with the specified ID was not found + '500': + description: Internal error + + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /credentials: + post: + tags: + - credentials + security: + - bearerAuth: [] + operationId: createCredential + summary: Add new credential + description: Add new credential + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + required: true + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + '400': + description: Invalid request body + '422': + description: Validation exception + get: + tags: + - credentials + security: + - bearerAuth: [] + summary: List all credentials + description: List all credentials + operationId: listCredentials + parameters: + - in: query + name: credentialName + required: false + schema: + type: string + description: Filter credentials by name + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Credential' + '500': + description: Internal error + + /credentials/{id}: + get: + tags: + - credentials + security: + - bearerAuth: [] + summary: Get a credential by ID + description: Retrieve a specific credential by ID + operationId: getCredentialById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Credential ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + '400': + description: The specified ID is invalid + '404': + description: Credential with the specified ID was not found + '500': + description: Internal error + put: + tags: + - credentials + security: + - bearerAuth: [] + summary: Update a credential by ID + description: Update a specific credential by ID + operationId: updateCredential + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Credential ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + required: true + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + '400': + description: The specified ID is invalid or request body is invalid + '404': + description: Credential with the specified ID was not found + '500': + description: Internal error + delete: + tags: + - credentials + security: + - bearerAuth: [] + summary: Delete a credential by ID + description: Delete a specific credential by ID + operationId: deleteCredential + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Credential ID + responses: + '200': + description: Successful operation + '400': + description: The specified ID is invalid + '404': + description: Credential with the specified ID was not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/loader/preview: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Preview document chunks + description: Preview document chunks from loader + operationId: previewChunking + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + required: true + responses: + '200': + description: Successfully preview chunks + content: + application/json: + schema: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/Document' + totalChunks: + type: integer + example: 10 + previewChunkCount: + type: integer + example: 5 + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/process: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Process loading & chunking operation + description: Process loading & chunking operation of document from loader + operationId: processChunking + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - id + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + id: + type: string + description: Document loader ID. If your URL is /document-stores/{storeId}/{id}, then id is the last part of the URL + example: 'c427e569-b81a-469a-b14c-fa73dd5bae49' + required: true + responses: + '200': + description: Successfully process chunking operation + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/save: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Save upsert configuration of document store + description: Save upsert configuration of document store + operationId: saveVectorStoreConfig + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + embeddingName: + type: string + description: Name of the embedding + example: 'openAIEmbeddings' + embeddingConfig: + type: object + description: Configuration of the embedding + example: { 'model': 'text-embedding-ada-002', 'credential': '1eba5808-c55b-4817-a285-b0c92846a7ad' } + vectorStoreName: + type: string + description: Name of the vector store + example: 'faiss' + vectorStoreConfig: + type: object + description: Configuration of the embedding + example: { 'basePath': './faiss' } + recordManagerName: + type: string + description: Name of the record manager + example: 'SQLiteRecordManager' + recordManagerConfig: + type: object + description: Configuration of the embedding + example: { 'databaseFilePath': './recordManager.db' } + required: true + responses: + '200': + description: Successfully save upsert configuration of document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/insert: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert chunks from document store + description: Upsert chunks from document store using the saved configuration + operationId: insertIntoVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + required: true + responses: + '200': + description: Successfully save upsert configuration of document store + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + description: The type of file upload (e.g., 'file', 'audio', 'url') + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger (1) (1).yml b/fr/.gitbook/assets/swagger (1) (1).yml new file mode 100644 index 00000000..379b15da --- /dev/null +++ b/fr/.gitbook/assets/swagger (1) (1).yml @@ -0,0 +1,2467 @@ +tags: + - name: apikey + - name: assistants + - name: chatmessage + - name: chatflows + - name: credentials + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /apikey: + post: + tags: + - apikey + security: + - bearerAuth: [] + operationId: createApiKey + summary: Add new api key + description: Add new api key + requestBody: + content: + application/json: + schema: + type: object + properties: + keyName: + type: string + example: 'someKeyName' + required: true + responses: + '200': + description: Api key created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + application/xml: + schema: + $ref: '#/components/schemas/ApiKey' + '400': + description: Invalid keyName provided + '404': + description: Api Key not found + '422': + description: Validation exception + get: + tags: + - apikey + security: + - bearerAuth: [] + summary: List all API keys + description: List all API keys + operationId: listApiKey + responses: + '200': + description: A list of API keys + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ApiKey' + application/xml: + schema: + type: array + items: + $ref: '#/components/schemas/ApiKey' + '500': + description: Internal error + /apikey/{id}: + put: + tags: + - apikey + security: + - bearerAuth: [] + summary: Update API key name + description: Update API key name + operationId: updateApiKey + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Api Key ID + requestBody: + content: + application/json: + schema: + type: object + properties: + keyName: + type: string + example: 'someKeyName' + responses: + '200': + description: Api key updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKey' + application/xml: + schema: + $ref: '#/components/schemas/ApiKey' + '400': + description: The specified ID is invalid + '404': + description: Api Key with the specified ID was not found + '500': + description: Internal error + delete: + tags: + - apikey + security: + - bearerAuth: [] + summary: Delete API key + description: Delete API key + operationId: deleteApiKey + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Api Key ID + responses: + '200': + description: Api key deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Api Key with the specified ID was not found + '500': + description: Internal error + + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /credentials: + post: + tags: + - credentials + security: + - bearerAuth: [] + operationId: createCredential + summary: Add new credential + description: Add new credential + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + required: true + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + '400': + description: Invalid request body + '422': + description: Validation exception + get: + tags: + - credentials + security: + - bearerAuth: [] + summary: List all credentials + description: List all credentials + operationId: listCredentials + parameters: + - in: query + name: credentialName + required: false + schema: + type: string + description: Filter credentials by name + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Credential' + '500': + description: Internal error + + /credentials/{id}: + get: + tags: + - credentials + security: + - bearerAuth: [] + summary: Get a credential by ID + description: Retrieve a specific credential by ID + operationId: getCredentialById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Credential ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + '400': + description: The specified ID is invalid + '404': + description: Credential with the specified ID was not found + '500': + description: Internal error + put: + tags: + - credentials + security: + - bearerAuth: [] + summary: Update a credential by ID + description: Update a specific credential by ID + operationId: updateCredential + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Credential ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + required: true + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Credential' + '400': + description: The specified ID is invalid or request body is invalid + '404': + description: Credential with the specified ID was not found + '500': + description: Internal error + delete: + tags: + - credentials + security: + - bearerAuth: [] + summary: Delete a credential by ID + description: Delete a specific credential by ID + operationId: deleteCredential + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Credential ID + responses: + '200': + description: Successful operation + '400': + description: The specified ID is invalid + '404': + description: Credential with the specified ID was not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/loader/preview: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Preview document chunks + description: Preview document chunks from loader + operationId: previewChunking + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + required: true + responses: + '200': + description: Successfully preview chunks + content: + application/json: + schema: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/Document' + totalChunks: + type: integer + example: 10 + previewChunkCount: + type: integer + example: 5 + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/process: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Process loading & chunking operation + description: Process loading & chunking operation of document from loader + operationId: processChunking + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - id + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + id: + type: string + description: Document loader ID. If your URL is /document-stores/{storeId}/{id}, then id is the last part of the URL + example: 'c427e569-b81a-469a-b14c-fa73dd5bae49' + required: true + responses: + '200': + description: Successfully process chunking operation + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/save: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Save upsert configuration of document store + description: Save upsert configuration of document store + operationId: saveVectorStoreConfig + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + embeddingName: + type: string + description: Name of the embedding + example: 'openAIEmbeddings' + embeddingConfig: + type: object + description: Configuration of the embedding + example: { 'model': 'text-embedding-ada-002', 'credential': '1eba5808-c55b-4817-a285-b0c92846a7ad' } + vectorStoreName: + type: string + description: Name of the vector store + example: 'faiss' + vectorStoreConfig: + type: object + description: Configuration of the embedding + example: { 'basePath': './faiss' } + recordManagerName: + type: string + description: Name of the record manager + example: 'SQLiteRecordManager' + recordManagerConfig: + type: object + description: Configuration of the embedding + example: { 'databaseFilePath': './recordManager.db' } + required: true + responses: + '200': + description: Successfully save upsert configuration of document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/insert: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert chunks from document store + description: Upsert chunks from document store using the saved configuration + operationId: insertIntoVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + required: true + responses: + '200': + description: Successfully save upsert configuration of document store + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + description: The type of file upload (e.g., 'file', 'audio', 'url') + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger (1).yml b/fr/.gitbook/assets/swagger (1).yml new file mode 100644 index 00000000..da52b2f0 --- /dev/null +++ b/fr/.gitbook/assets/swagger (1).yml @@ -0,0 +1,2273 @@ +tags: + - name: assistants + - name: attachments + - name: chatmessage + - name: chatflows + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /attachments/{chatflowId}/{chatId}: + post: + tags: + - attachments + security: + - bearerAuth: [] + operationId: createAttachment + summary: Create attachments array + description: Return contents of the files in plain string format + parameters: + - in: path + name: chatflowId + required: true + schema: + type: string + description: Chatflow ID + - in: path + name: chatId + required: true + schema: + type: string + description: Chat ID + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + required: + - files + required: true + responses: + '200': + description: Attachments created successfully + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateAttachmentResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow or ChatId not found + '422': + description: Validation error + '500': + description: Internal server error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/loader/preview: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Preview document chunks + description: Preview document chunks from loader + operationId: previewChunking + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + required: true + responses: + '200': + description: Successfully preview chunks + content: + application/json: + schema: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/Document' + totalChunks: + type: integer + example: 10 + previewChunkCount: + type: integer + example: 5 + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/process: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Process loading & chunking operation + description: Process loading & chunking operation of document from loader + operationId: processChunking + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - id + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + id: + type: string + description: Document loader ID. If your URL is /document-stores/{storeId}/{id}, then id is the last part of the URL + example: 'c427e569-b81a-469a-b14c-fa73dd5bae49' + required: true + responses: + '200': + description: Successfully process chunking operation + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/save: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Save upsert configuration of document store + description: Save upsert configuration of document store + operationId: saveVectorStoreConfig + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + embeddingName: + type: string + description: Name of the embedding + example: 'openAIEmbeddings' + embeddingConfig: + type: object + description: Configuration of the embedding + example: { 'model': 'text-embedding-ada-002', 'credential': '1eba5808-c55b-4817-a285-b0c92846a7ad' } + vectorStoreName: + type: string + description: Name of the vector store + example: 'faiss' + vectorStoreConfig: + type: object + description: Configuration of the embedding + example: { 'basePath': './faiss' } + recordManagerName: + type: string + description: Name of the record manager + example: 'SQLiteRecordManager' + recordManagerConfig: + type: object + description: Configuration of the embedding + example: { 'databaseFilePath': './recordManager.db' } + required: true + responses: + '200': + description: Successfully save upsert configuration of document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/insert: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert chunks from document store + description: Upsert chunks from document store using the saved configuration + operationId: insertIntoVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + required: true + responses: + '200': + description: Successfully save upsert configuration of document store + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + enum: [audio, url, file, file:rag, file:full] + description: The type of file upload + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + CreateAttachmentResponse: + type: object + properties: + name: + type: string + description: Name of the file + mimeType: + type: string + description: Mime type of the file + size: + type: string + description: Size of the file + content: + type: string + description: Content of the file in string format + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger (2) (1).yml b/fr/.gitbook/assets/swagger (2) (1).yml new file mode 100644 index 00000000..ad9b5e4b --- /dev/null +++ b/fr/.gitbook/assets/swagger (2) (1).yml @@ -0,0 +1,2281 @@ +tags: + - name: assistants + - name: attachments + - name: chatmessage + - name: chatflows + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /attachments/{chatflowId}/{chatId}: + post: + tags: + - attachments + security: + - bearerAuth: [] + operationId: createAttachment + summary: Create attachments array + description: Return contents of the files in plain string format + parameters: + - in: path + name: chatflowId + required: true + schema: + type: string + description: Chatflow ID + - in: path + name: chatId + required: true + schema: + type: string + description: Chat ID + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + base64: + type: boolean + default: false + description: Return contents of the files in base64 format + required: + - files + required: true + responses: + '200': + description: Attachments created successfully + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateAttachmentResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow or ChatId not found + '422': + description: Validation error + '500': + description: Internal server error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/upsert/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert new document to document store + description: Upsert new document to document store + operationId: upsertDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + loader: + type: string + nullable: true + example: '{"name":"plainText","config":{"text":"why the sky is blue"}}' + description: Loader configurations + splitter: + type: string + nullable: true + example: '{"name":"recursiveCharacterTextSplitter","config":{"chunkSize":2000}}' + description: Splitter configurations + embedding: + type: string + nullable: true + example: '{"name":"openAIEmbeddings","config":{"modelName":"text-embedding-ada-002"}}' + description: Embedding configurations + vectorStore: + type: string + nullable: true + example: '{"name":"faiss"}' + description: Vector Store configurations + recordManager: + type: string + nullable: true + example: '{"name":"postgresRecordManager"}' + description: Record Manager configurations + required: + - files + required: true + responses: + '200': + description: Successfully execute upsert operation + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/refresh/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Re-process and upsert all documents in document store + description: Re-process and upsert all existing documents in document store + operationId: refreshDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForRefresh' + required: true + responses: + '200': + description: Successfully execute refresh operation + content: + application/json: + type: array + items: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + enum: [audio, url, file, file:rag, file:full] + description: The type of file upload + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + DocumentStoreLoaderForUpsert: + type: object + properties: + docId: + type: string + format: uuid + description: Document ID within the store. If provided, existing configuration from the document will be used for the new document + loader: + type: object + properties: + name: + type: string + example: plainText + description: Name of the loader (camelCase) + config: + type: object + description: Configuration for the loader + splitter: + type: object + properties: + name: + type: string + example: recursiveCharacterTextSplitter + description: Name of the text splitter (camelCase) + config: + type: object + description: Configuration for the text splitter + embedding: + type: object + properties: + name: + type: string + example: openAIEmbeddings + description: Name of the embedding generator (camelCase) + config: + type: object + description: Configuration for the embedding generator + vectorStore: + type: object + properties: + name: + type: string + example: faiss + description: Name of the vector store (camelCase) + config: + type: object + description: Configuration for the vector store + recordManager: + type: object + properties: + name: + type: string + example: postgresRecordManager + description: Name of the record manager (camelCase) + config: + type: object + description: Configuration for the record manager + + DocumentStoreLoaderForRefresh: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + CreateAttachmentResponse: + type: object + properties: + name: + type: string + description: Name of the file + mimeType: + type: string + description: Mime type of the file + size: + type: string + description: Size of the file + content: + type: string + description: Content of the file in string format + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger (2).yml b/fr/.gitbook/assets/swagger (2).yml new file mode 100644 index 00000000..5a92d4bf --- /dev/null +++ b/fr/.gitbook/assets/swagger (2).yml @@ -0,0 +1,2438 @@ +tags: + - name: assistants + - name: attachments + - name: chatmessage + - name: chatflows + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /attachments/{chatflowId}/{chatId}: + post: + tags: + - attachments + security: + - bearerAuth: [] + operationId: createAttachment + summary: Create attachments array + description: Return contents of the files in plain string format + parameters: + - in: path + name: chatflowId + required: true + schema: + type: string + description: Chatflow ID + - in: path + name: chatId + required: true + schema: + type: string + description: Chat ID + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + base64: + type: boolean + default: false + description: Return contents of the files in base64 format + required: + - files + required: true + responses: + '200': + description: Attachments created successfully + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateAttachmentResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow or ChatId not found + '422': + description: Validation error + '500': + description: Internal server error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/upsert/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert new document to document store + description: Upsert new document to document store + operationId: upsertDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + loader: + type: string + nullable: true + example: '{"name":"plainText","config":{"text":"why the sky is blue"}}' + description: Loader configurations + splitter: + type: string + nullable: true + example: '{"name":"recursiveCharacterTextSplitter","config":{"chunkSize":2000}}' + description: Splitter configurations + embedding: + type: string + nullable: true + example: '{"name":"openAIEmbeddings","config":{"modelName":"text-embedding-ada-002"}}' + description: Embedding configurations + vectorStore: + type: string + nullable: true + example: '{"name":"faiss"}' + description: Vector Store configurations + recordManager: + type: string + nullable: true + example: '{"name":"postgresRecordManager"}' + description: Record Manager configurations + required: + - files + required: true + responses: + '200': + description: Successfully execute upsert operation + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/refresh/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Re-process and upsert all documents in document store + description: Re-process and upsert all existing documents in document store + operationId: refreshDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForRefresh' + required: true + responses: + '200': + description: Successfully execute refresh operation + content: + application/json: + type: array + items: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document Store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/{storeId}/{loaderId}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete specific document loader and associated chunks from document store + description: Delete specific document loader and associated chunks from document store. This does not delete data from vector store. + operationId: deleteLoaderFromDocumentStore + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + responses: + '200': + description: Successfully deleted loader from document store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{pageNo}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get chunks from a specific document loader + description: Get chunks from a specific document loader within a document store + operationId: getDocumentStoreFileChunks + parameters: + - in: path + name: storeId + required: true + schema: + type: string + format: uuid + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + format: uuid + description: Document loader ID + - in: path + name: pageNo + required: true + schema: + type: string + description: Pagination number + responses: + '200': + description: Successfully retrieved chunks from document loader + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{chunkId}: + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific chunk + description: Updates a specific chunk from a document loader + operationId: editDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Document' + required: true + responses: + '200': + description: Successfully updated chunk + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific chunk from a document loader + description: Delete a specific chunk from a document loader + operationId: deleteDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + responses: + '200': + description: Successfully deleted chunk + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + enum: [audio, url, file, file:rag, file:full] + description: The type of file upload + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document Store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + DocumentStoreLoaderForUpsert: + type: object + properties: + docId: + type: string + format: uuid + description: Document ID within the store. If provided, existing configuration from the document will be used for the new document + loader: + type: object + properties: + name: + type: string + example: plainText + description: Name of the loader (camelCase) + config: + type: object + description: Configuration for the loader + splitter: + type: object + properties: + name: + type: string + example: recursiveCharacterTextSplitter + description: Name of the text splitter (camelCase) + config: + type: object + description: Configuration for the text splitter + embedding: + type: object + properties: + name: + type: string + example: openAIEmbeddings + description: Name of the embedding generator (camelCase) + config: + type: object + description: Configuration for the embedding generator + vectorStore: + type: object + properties: + name: + type: string + example: faiss + description: Name of the vector store (camelCase) + config: + type: object + description: Configuration for the vector store + recordManager: + type: object + properties: + name: + type: string + example: postgresRecordManager + description: Name of the record manager (camelCase) + config: + type: object + description: Configuration for the record manager + + DocumentStoreLoaderForRefresh: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + CreateAttachmentResponse: + type: object + properties: + name: + type: string + description: Name of the file + mimeType: + type: string + description: Mime type of the file + size: + type: string + description: Size of the file + content: + type: string + description: Content of the file in string format + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger (3).yml b/fr/.gitbook/assets/swagger (3).yml new file mode 100644 index 00000000..0ea487b4 --- /dev/null +++ b/fr/.gitbook/assets/swagger (3).yml @@ -0,0 +1,2465 @@ +tags: + - name: assistants + - name: attachments + - name: chatmessage + - name: chatflows + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type. Only applicable if feedback is true + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + description: Filter by end date + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /attachments/{chatflowId}/{chatId}: + post: + tags: + - attachments + security: + - bearerAuth: [] + operationId: createAttachment + summary: Create attachments array + description: Return contents of the files in plain string format + parameters: + - in: path + name: chatflowId + required: true + schema: + type: string + description: Chatflow ID + - in: path + name: chatId + required: true + schema: + type: string + description: Chat ID + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + base64: + type: boolean + default: false + description: Return contents of the files in base64 format + required: + - files + required: true + responses: + '200': + description: Attachments created successfully + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateAttachmentResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow or ChatId not found + '422': + description: Validation error + '500': + description: Internal server error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/upsert/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert document to document store + description: Upsert document to document store + operationId: upsertDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + loader: + type: string + nullable: true + example: '{"name":"plainText","config":{"text":"why the sky is blue"}}' + description: Loader configurations + splitter: + type: string + nullable: true + example: '{"name":"recursiveCharacterTextSplitter","config":{"chunkSize":2000}}' + description: Splitter configurations + embedding: + type: string + nullable: true + example: '{"name":"openAIEmbeddings","config":{"modelName":"text-embedding-ada-002"}}' + description: Embedding configurations + vectorStore: + type: string + nullable: true + example: '{"name":"faiss"}' + description: Vector Store configurations + recordManager: + type: string + nullable: true + example: '{"name":"postgresRecordManager"}' + description: Record Manager configurations + required: + - files + required: true + responses: + '200': + description: Successfully execute upsert operation + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/refresh/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Re-process and upsert all documents in document store + description: Re-process and upsert all existing documents in document store + operationId: refreshDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForRefresh' + required: true + responses: + '200': + description: Successfully execute refresh operation + content: + application/json: + type: array + items: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document Store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/{storeId}/{loaderId}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete specific document loader and associated chunks from document store + description: Delete specific document loader and associated chunks from document store. This does not delete data from vector store. + operationId: deleteLoaderFromDocumentStore + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + responses: + '200': + description: Successfully deleted loader from document store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{pageNo}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get chunks from a specific document loader + description: Get chunks from a specific document loader within a document store + operationId: getDocumentStoreFileChunks + parameters: + - in: path + name: storeId + required: true + schema: + type: string + format: uuid + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + format: uuid + description: Document loader ID + - in: path + name: pageNo + required: true + schema: + type: string + description: Pagination number + responses: + '200': + description: Successfully retrieved chunks from document loader + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{chunkId}: + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific chunk + description: Updates a specific chunk from a document loader + operationId: editDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Document' + required: true + responses: + '200': + description: Successfully updated chunk + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific chunk from a document loader + description: Delete a specific chunk from a document loader + operationId: deleteDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + responses: + '200': + description: Successfully deleted chunk + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + enum: [audio, url, file, file:rag, file:full] + description: The type of file upload + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document Store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + DocumentStoreLoaderForUpsert: + type: object + properties: + docId: + type: string + format: uuid + description: Document ID within the store. If provided, existing configuration from the document will be used for the new document + metadata: + type: object + description: Metadata associated with the document + example: { 'foo': 'bar' } + replaceExisting: + type: boolean + description: Whether to replace existing document loader with the new upserted chunks. However this does not delete the existing embeddings in the vector store + loader: + type: object + properties: + name: + type: string + example: plainText + description: Name of the loader (camelCase) + config: + type: object + description: Configuration for the loader + splitter: + type: object + properties: + name: + type: string + example: recursiveCharacterTextSplitter + description: Name of the text splitter (camelCase) + config: + type: object + description: Configuration for the text splitter + embedding: + type: object + properties: + name: + type: string + example: openAIEmbeddings + description: Name of the embedding generator (camelCase) + config: + type: object + description: Configuration for the embedding generator + vectorStore: + type: object + properties: + name: + type: string + example: faiss + description: Name of the vector store (camelCase) + config: + type: object + description: Configuration for the vector store + recordManager: + type: object + properties: + name: + type: string + example: postgresRecordManager + description: Name of the record manager (camelCase) + config: + type: object + description: Configuration for the record manager + + DocumentStoreLoaderForRefresh: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + CreateAttachmentResponse: + type: object + properties: + name: + type: string + description: Name of the file + mimeType: + type: string + description: Mime type of the file + size: + type: string + description: Size of the file + content: + type: string + description: Content of the file in string format + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger (4).yml b/fr/.gitbook/assets/swagger (4).yml new file mode 100644 index 00000000..3b37cc52 --- /dev/null +++ b/fr/.gitbook/assets/swagger (4).yml @@ -0,0 +1,2470 @@ +tags: + - name: assistants + - name: attachments + - name: chatmessage + - name: chatflows + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type. Only applicable if feedback is true + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + description: Filter by end date + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + - in: query + name: hardDelete + schema: + type: boolean + description: If hardDelete is true, messages will be deleted from the third party service as well + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /attachments/{chatflowId}/{chatId}: + post: + tags: + - attachments + security: + - bearerAuth: [] + operationId: createAttachment + summary: Create attachments array + description: Return contents of the files in plain string format + parameters: + - in: path + name: chatflowId + required: true + schema: + type: string + description: Chatflow ID + - in: path + name: chatId + required: true + schema: + type: string + description: Chat ID + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + base64: + type: boolean + default: false + description: Return contents of the files in base64 format + required: + - files + required: true + responses: + '200': + description: Attachments created successfully + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateAttachmentResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow or ChatId not found + '422': + description: Validation error + '500': + description: Internal server error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/upsert/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert document to document store + description: Upsert document to document store + operationId: upsertDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + loader: + type: string + nullable: true + example: '{"name":"plainText","config":{"text":"why the sky is blue"}}' + description: Loader configurations + splitter: + type: string + nullable: true + example: '{"name":"recursiveCharacterTextSplitter","config":{"chunkSize":2000}}' + description: Splitter configurations + embedding: + type: string + nullable: true + example: '{"name":"openAIEmbeddings","config":{"modelName":"text-embedding-ada-002"}}' + description: Embedding configurations + vectorStore: + type: string + nullable: true + example: '{"name":"faiss"}' + description: Vector Store configurations + recordManager: + type: string + nullable: true + example: '{"name":"postgresRecordManager"}' + description: Record Manager configurations + required: + - files + required: true + responses: + '200': + description: Successfully execute upsert operation + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/refresh/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Re-process and upsert all documents in document store + description: Re-process and upsert all existing documents in document store + operationId: refreshDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForRefresh' + required: true + responses: + '200': + description: Successfully execute refresh operation + content: + application/json: + type: array + items: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document Store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/{storeId}/{loaderId}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete specific document loader and associated chunks from document store + description: Delete specific document loader and associated chunks from document store. This does not delete data from vector store. + operationId: deleteLoaderFromDocumentStore + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + responses: + '200': + description: Successfully deleted loader from document store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{pageNo}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get chunks from a specific document loader + description: Get chunks from a specific document loader within a document store + operationId: getDocumentStoreFileChunks + parameters: + - in: path + name: storeId + required: true + schema: + type: string + format: uuid + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + format: uuid + description: Document loader ID + - in: path + name: pageNo + required: true + schema: + type: string + description: Pagination number + responses: + '200': + description: Successfully retrieved chunks from document loader + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{chunkId}: + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific chunk + description: Updates a specific chunk from a document loader + operationId: editDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Document' + required: true + responses: + '200': + description: Successfully updated chunk + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific chunk from a document loader + description: Delete a specific chunk from a document loader + operationId: deleteDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + responses: + '200': + description: Successfully deleted chunk + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + enum: [audio, url, file, file:rag, file:full] + description: The type of file upload + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document Store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + DocumentStoreLoaderForUpsert: + type: object + properties: + docId: + type: string + format: uuid + description: Document ID within the store. If provided, existing configuration from the document will be used for the new document + metadata: + type: object + description: Metadata associated with the document + example: { 'foo': 'bar' } + replaceExisting: + type: boolean + description: Whether to replace existing document loader with the new upserted chunks. However this does not delete the existing embeddings in the vector store + loader: + type: object + properties: + name: + type: string + example: plainText + description: Name of the loader (camelCase) + config: + type: object + description: Configuration for the loader + splitter: + type: object + properties: + name: + type: string + example: recursiveCharacterTextSplitter + description: Name of the text splitter (camelCase) + config: + type: object + description: Configuration for the text splitter + embedding: + type: object + properties: + name: + type: string + example: openAIEmbeddings + description: Name of the embedding generator (camelCase) + config: + type: object + description: Configuration for the embedding generator + vectorStore: + type: object + properties: + name: + type: string + example: faiss + description: Name of the vector store (camelCase) + config: + type: object + description: Configuration for the vector store + recordManager: + type: object + properties: + name: + type: string + example: postgresRecordManager + description: Name of the record manager (camelCase) + config: + type: object + description: Configuration for the record manager + + DocumentStoreLoaderForRefresh: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + CreateAttachmentResponse: + type: object + properties: + name: + type: string + description: Name of the file + mimeType: + type: string + description: Mime type of the file + size: + type: string + description: Size of the file + content: + type: string + description: Content of the file in string format + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger (5).yml b/fr/.gitbook/assets/swagger (5).yml new file mode 100644 index 00000000..9cca7597 --- /dev/null +++ b/fr/.gitbook/assets/swagger (5).yml @@ -0,0 +1,2526 @@ +tags: + - name: assistants + - name: attachments + - name: chatmessage + - name: chatflows + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type. Only applicable if feedback is true + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + description: Filter by end date + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + - in: query + name: hardDelete + schema: + type: boolean + description: If hardDelete is true, messages will be deleted from the third party service as well + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /attachments/{chatflowId}/{chatId}: + post: + tags: + - attachments + security: + - bearerAuth: [] + operationId: createAttachment + summary: Create attachments array + description: Return contents of the files in plain string format + parameters: + - in: path + name: chatflowId + required: true + schema: + type: string + description: Chatflow ID + - in: path + name: chatId + required: true + schema: + type: string + description: Chat ID + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + base64: + type: boolean + default: false + description: Return contents of the files in base64 format + required: + - files + required: true + responses: + '200': + description: Attachments created successfully + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateAttachmentResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow or ChatId not found + '422': + description: Validation error + '500': + description: Internal server error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/upsert/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert document to document store + description: Upsert document to document store + operationId: upsertDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + docId: + type: string + nullable: true + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + description: Document ID to use existing configuration + loader: + type: string + nullable: true + example: '{"name":"plainText","config":{"text":"why the sky is blue"}}' + description: Loader configurations + splitter: + type: string + nullable: true + example: '{"name":"recursiveCharacterTextSplitter","config":{"chunkSize":2000}}' + description: Splitter configurations + embedding: + type: string + nullable: true + example: '{"name":"openAIEmbeddings","config":{"modelName":"text-embedding-ada-002"}}' + description: Embedding configurations + vectorStore: + type: string + nullable: true + example: '{"name":"faiss"}' + description: Vector Store configurations + recordManager: + type: string + nullable: true + example: '{"name":"postgresRecordManager"}' + description: Record Manager configurations + metadata: + type: object + nullable: true + description: Metadata associated with the document + example: { 'foo': 'bar' } + replaceExisting: + type: boolean + nullable: true + description: Whether to replace existing document loader with the new upserted chunks. However this does not delete the existing embeddings in the vector store + createNewDocStore: + type: boolean + nullable: true + description: Whether to create a new document store + docStore: + type: object + nullable: true + description: Only when createNewDocStore is true, pass in the new document store configuration + properties: + name: + type: string + example: plainText + description: Name of the new document store to be created + description: + type: string + example: plainText + description: Description of the new document store to be created + required: + - files + required: true + responses: + '200': + description: Successfully execute upsert operation + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/refresh/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Re-process and upsert all documents in document store + description: Re-process and upsert all existing documents in document store + operationId: refreshDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForRefresh' + required: true + responses: + '200': + description: Successfully execute refresh operation + content: + application/json: + type: array + items: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document Store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/{storeId}/{loaderId}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete specific document loader and associated chunks from document store + description: Delete specific document loader and associated chunks from document store. This does not delete data from vector store. + operationId: deleteLoaderFromDocumentStore + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + responses: + '200': + description: Successfully deleted loader from document store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{pageNo}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get chunks from a specific document loader + description: Get chunks from a specific document loader within a document store + operationId: getDocumentStoreFileChunks + parameters: + - in: path + name: storeId + required: true + schema: + type: string + format: uuid + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + format: uuid + description: Document loader ID + - in: path + name: pageNo + required: true + schema: + type: string + description: Pagination number + responses: + '200': + description: Successfully retrieved chunks from document loader + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{chunkId}: + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific chunk + description: Updates a specific chunk from a document loader + operationId: editDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Document' + required: true + responses: + '200': + description: Successfully updated chunk + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific chunk from a document loader + description: Delete a specific chunk from a document loader + operationId: deleteDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + responses: + '200': + description: Successfully deleted chunk + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + enum: [audio, url, file, file:rag, file:full] + description: The type of file upload + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document Store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + DocumentStoreLoaderForUpsert: + type: object + properties: + docId: + type: string + format: uuid + nullable: true + description: Document ID within the store. If provided, existing configuration from the document will be used for the new document + metadata: + type: object + nullable: true + description: Metadata associated with the document + example: { 'foo': 'bar' } + replaceExisting: + type: boolean + nullable: true + description: Whether to replace existing document loader with the new upserted chunks. However this does not delete the existing embeddings in the vector store + createNewDocStore: + type: boolean + nullable: true + description: Whether to create a new document store + docStore: + type: object + nullable: true + description: Only when createNewDocStore is true, pass in the new document store configuration + properties: + name: + type: string + example: plainText + description: Name of the new document store to be created + description: + type: string + example: plainText + description: Description of the new document store to be created + loader: + type: object + nullable: true + properties: + name: + type: string + example: plainText + description: Name of the loader (camelCase) + config: + type: object + description: Configuration for the loader + splitter: + type: object + nullable: true + properties: + name: + type: string + example: recursiveCharacterTextSplitter + description: Name of the text splitter (camelCase) + config: + type: object + description: Configuration for the text splitter + embedding: + type: object + nullable: true + properties: + name: + type: string + example: openAIEmbeddings + description: Name of the embedding generator (camelCase) + config: + type: object + description: Configuration for the embedding generator + vectorStore: + type: object + nullable: true + properties: + name: + type: string + example: faiss + description: Name of the vector store (camelCase) + config: + type: object + description: Configuration for the vector store + recordManager: + type: object + nullable: true + properties: + name: + type: string + example: postgresRecordManager + description: Name of the record manager (camelCase) + config: + type: object + description: Configuration for the record manager + + DocumentStoreLoaderForRefresh: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + CreateAttachmentResponse: + type: object + properties: + name: + type: string + description: Name of the file + mimeType: + type: string + description: Mime type of the file + size: + type: string + description: Size of the file + content: + type: string + description: Content of the file in string format + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/swagger.yml b/fr/.gitbook/assets/swagger.yml new file mode 100644 index 00000000..9cca7597 --- /dev/null +++ b/fr/.gitbook/assets/swagger.yml @@ -0,0 +1,2526 @@ +tags: + - name: assistants + - name: attachments + - name: chatmessage + - name: chatflows + - name: document-store + - name: feedback + - name: leads + - name: ping + - name: prediction + - name: tools + - name: upsert-history + - name: variables + - name: vector + +paths: + /chatmessage/{id}: + get: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: getAllChatMessages + summary: List all chat messages + description: Retrieve all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: order + schema: + type: string + enum: [ASC, DESC] + description: Sort order + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + format: date-time + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + format: date-time + description: Filter by end date + - in: query + name: feedback + schema: + type: boolean + description: Filter by feedback + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type. Only applicable if feedback is true + responses: + '200': + description: A list of chat messages + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessage' + '500': + description: Internal error + + delete: + tags: + - chatmessage + security: + - bearerAuth: [] + operationId: removeAllChatMessages + summary: Delete all chat messages + description: Delete all chat messages for a specific chatflow. + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Filter by chat ID + - in: query + name: memoryType + schema: + type: string + example: Buffer Memory + description: Filter by memory type + - in: query + name: sessionId + schema: + type: string + description: Filter by session ID + - in: query + name: chatType + schema: + type: string + enum: [INTERNAL, EXTERNAL] + description: Filter by chat type + - in: query + name: startDate + schema: + type: string + example: 2025-01-01T11:28:36.000Z + description: Filter by start date + - in: query + name: endDate + schema: + type: string + example: 2025-01-13T11:28:36.000Z + description: Filter by end date + - in: query + name: feedbackType + schema: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Filter by feedback type + - in: query + name: hardDelete + schema: + type: boolean + description: If hardDelete is true, messages will be deleted from the third party service as well + responses: + '200': + description: Chat messages deleted successfully + '400': + description: Invalid parameters + '404': + description: Chat messages not found + '500': + description: Internal error + /assistants: + post: + tags: + - assistants + security: + - bearerAuth: [] + operationId: createAssistant + summary: Create a new assistant + description: Create a new assistant with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + required: true + responses: + '200': + description: Assistant created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: List all assistants + description: Retrieve a list of all assistants + operationId: listAssistants + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Assistant' + '500': + description: Internal error + /assistants/{id}: + get: + tags: + - assistants + security: + - bearerAuth: [] + summary: Get assistant by ID + description: Retrieve a specific assistant by ID + operationId: getAssistantById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + put: + tags: + - assistants + security: + - bearerAuth: [] + summary: Update assistant details + description: Update the details of an existing assistant + operationId: updateAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Assistant' + responses: + '200': + description: Assistant updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/assistant' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Assistant not found + '500': + description: Internal error + delete: + tags: + - assistants + security: + - bearerAuth: [] + summary: Delete an assistant + description: Delete an assistant by ID + operationId: deleteAssistant + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Assistant ID + responses: + '200': + description: Assistant deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Assistant not found + '500': + description: Internal error + + /attachments/{chatflowId}/{chatId}: + post: + tags: + - attachments + security: + - bearerAuth: [] + operationId: createAttachment + summary: Create attachments array + description: Return contents of the files in plain string format + parameters: + - in: path + name: chatflowId + required: true + schema: + type: string + description: Chatflow ID + - in: path + name: chatId + required: true + schema: + type: string + description: Chat ID + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + base64: + type: boolean + default: false + description: Return contents of the files in base64 format + required: + - files + required: true + responses: + '200': + description: Attachments created successfully + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/CreateAttachmentResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow or ChatId not found + '422': + description: Validation error + '500': + description: Internal server error + + /chatflows: + post: + tags: + - chatflows + security: + - bearerAuth: [] + operationId: createChatflow + summary: Create a new chatflow + description: Create a new chatflow with the provided details + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + required: true + responses: + '200': + description: Chatflow created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: Invalid input provided + '422': + description: Validation exception + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: List all chatflows + description: Retrieve a list of all chatflows + operationId: listChatflows + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Chatflow' + '500': + description: Internal error + /chatflows/{id}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by ID + description: Retrieve a specific chatflow by ID + operationId: getChatflowById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + put: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Update chatflow details + description: Update the details of an existing chatflow + operationId: updateChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + responses: + '200': + description: Chatflow updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified ID is invalid or body is missing + '404': + description: Chatflow not found + '500': + description: Internal error + delete: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Delete a chatflow + description: Delete a chatflow by ID + operationId: deleteChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Chatflow deleted successfully + '400': + description: The specified ID is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + /chatflows/apikey/{apikey}: + get: + tags: + - chatflows + security: + - bearerAuth: [] + summary: Get chatflow by API key + description: Retrieve a chatflow using an API key + operationId: getChatflowByApiKey + parameters: + - in: path + name: apikey + required: true + schema: + type: string + description: API key associated with the chatflow + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Chatflow' + '400': + description: The specified API key is invalid + '404': + description: Chatflow not found + '500': + description: Internal error + + /document-store/store: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Create a new document store + description: Creates a new document store with the provided details + operationId: createDocumentStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully created document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '400': + description: Invalid request body + '500': + description: Internal server error + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: List all document stores + description: Retrieves a list of all document stores + operationId: getAllDocumentStores + responses: + '200': + description: A list of document stores + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DocumentStore' + '500': + description: Internal server error + + /document-store/store/{id}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get a specific document store + description: Retrieves details of a specific document store by its ID + operationId: getDocumentStoreById + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully retrieved document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific document store + description: Updates the details of a specific document store by its ID + operationId: updateDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + required: true + responses: + '200': + description: Successfully updated document store + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStore' + '404': + description: Document store not found + '500': + description: Internal server error + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific document store + description: Deletes a document store by its ID + operationId: deleteDocumentStore + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + responses: + '200': + description: Successfully deleted document store + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/upsert/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Upsert document to document store + description: Upsert document to document store + operationId: upsertDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + docId: + type: string + nullable: true + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + description: Document ID to use existing configuration + loader: + type: string + nullable: true + example: '{"name":"plainText","config":{"text":"why the sky is blue"}}' + description: Loader configurations + splitter: + type: string + nullable: true + example: '{"name":"recursiveCharacterTextSplitter","config":{"chunkSize":2000}}' + description: Splitter configurations + embedding: + type: string + nullable: true + example: '{"name":"openAIEmbeddings","config":{"modelName":"text-embedding-ada-002"}}' + description: Embedding configurations + vectorStore: + type: string + nullable: true + example: '{"name":"faiss"}' + description: Vector Store configurations + recordManager: + type: string + nullable: true + example: '{"name":"postgresRecordManager"}' + description: Record Manager configurations + metadata: + type: object + nullable: true + description: Metadata associated with the document + example: { 'foo': 'bar' } + replaceExisting: + type: boolean + nullable: true + description: Whether to replace existing document loader with the new upserted chunks. However this does not delete the existing embeddings in the vector store + createNewDocStore: + type: boolean + nullable: true + description: Whether to create a new document store + docStore: + type: object + nullable: true + description: Only when createNewDocStore is true, pass in the new document store configuration + properties: + name: + type: string + example: plainText + description: Name of the new document store to be created + description: + type: string + example: plainText + description: Description of the new document store to be created + required: + - files + required: true + responses: + '200': + description: Successfully execute upsert operation + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/refresh/{id}: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Re-process and upsert all documents in document store + description: Re-process and upsert all existing documents in document store + operationId: refreshDocument + parameters: + - in: path + name: id + required: true + schema: + type: string + format: uuid + description: Document Store ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreLoaderForRefresh' + required: true + responses: + '200': + description: Successfully execute refresh operation + content: + application/json: + type: array + items: + $ref: '#/components/schemas/VectorUpsertResponse' + + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/vectorstore/query: + post: + tags: + - document-store + security: + - bearerAuth: [] + summary: Retrieval query + description: Retrieval query for the upserted chunks + operationId: queryVectorStore + requestBody: + content: + application/json: + schema: + type: object + required: + - storeId + - query + properties: + storeId: + type: string + description: Document Store ID + example: '603a7b51-ae7c-4b0a-8865-e454ed2f6766' + query: + type: string + description: Query to search for + example: 'What is the capital of France?' + required: true + responses: + '200': + description: Successfully executed query on vector store + content: + application/json: + schema: + type: object + properties: + timeTaken: + type: number + description: Time taken to execute the query (in milliseconds) + docs: + type: array + items: + $ref: '#/components/schemas/Document' + '400': + description: Invalid request body + '500': + description: Internal server error + + /document-store/loader/{storeId}/{loaderId}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete specific document loader and associated chunks from document store + description: Delete specific document loader and associated chunks from document store. This does not delete data from vector store. + operationId: deleteLoaderFromDocumentStore + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + responses: + '200': + description: Successfully deleted loader from document store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/vectorstore/{id}: + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete data from vector store + description: Only data that were upserted with Record Manager will be deleted from vector store + operationId: deleteVectorStoreFromStore + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Document Store ID + responses: + '200': + description: Successfully deleted data from vector store + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{pageNo}: + get: + tags: + - document-store + security: + - bearerAuth: [] + summary: Get chunks from a specific document loader + description: Get chunks from a specific document loader within a document store + operationId: getDocumentStoreFileChunks + parameters: + - in: path + name: storeId + required: true + schema: + type: string + format: uuid + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + format: uuid + description: Document loader ID + - in: path + name: pageNo + required: true + schema: + type: string + description: Pagination number + responses: + '200': + description: Successfully retrieved chunks from document loader + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + /document-store/chunks/{storeId}/{loaderId}/{chunkId}: + put: + tags: + - document-store + security: + - bearerAuth: [] + summary: Update a specific chunk + description: Updates a specific chunk from a document loader + operationId: editDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Document' + required: true + responses: + '200': + description: Successfully updated chunk + content: + application/json: + schema: + $ref: '#/components/schemas/DocumentStoreFileChunkPagedResponse' + '404': + description: Document store not found + '500': + description: Internal server error + + delete: + tags: + - document-store + security: + - bearerAuth: [] + summary: Delete a specific chunk from a document loader + description: Delete a specific chunk from a document loader + operationId: deleteDocumentStoreFileChunk + parameters: + - in: path + name: storeId + required: true + schema: + type: string + description: Document Store ID + - in: path + name: loaderId + required: true + schema: + type: string + description: Document Loader ID + - in: path + name: chunkId + required: true + schema: + type: string + description: Document Chunk ID + responses: + '200': + description: Successfully deleted chunk + '400': + description: Invalid ID provided + '404': + description: Document Store not found + '500': + description: Internal server error + + /feedback: + post: + tags: + - feedback + security: + - bearerAuth: [] + operationId: createChatMessageFeedbackForChatflow + summary: Create new chat message feedback + description: Create new feedback for a specific chat flow. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + required: true + responses: + '200': + description: Feedback successfully created + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '500': + description: Internal server error + /feedback/{id}: + get: + tags: + - feedback + security: + - bearerAuth: [] + summary: List all chat message feedbacks for a chatflow + description: Retrieve all feedbacks for a chatflow + operationId: getAllChatMessageFeedback + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + - in: query + name: chatId + schema: + type: string + description: Chat ID to filter feedbacks (optional) + - in: query + name: sortOrder + schema: + type: string + enum: [asc, desc] + default: asc + description: Sort order of feedbacks (optional) + - in: query + name: startDate + schema: + type: string + format: date-time + description: Filter feedbacks starting from this date (optional) + - in: query + name: endDate + schema: + type: string + format: date-time + description: Filter feedbacks up to this date (optional) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ChatMessageFeedback' + '500': + description: Internal server error + put: + tags: + - feedback + security: + - bearerAuth: [] + summary: Update chat message feedback + description: Update a specific feedback + operationId: updateChatMessageFeedbackForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chat Message Feedback ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + responses: + '200': + description: Feedback successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/ChatMessageFeedback' + '400': + description: Invalid input provided + '404': + description: Feedback with the specified ID was not found + '500': + description: Internal server error + + /leads: + post: + tags: + - leads + security: + - bearerAuth: [] + operationId: createLead + summary: Create a new lead in a chatflow + description: Create a new lead associated with a specific chatflow + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + required: true + responses: + '200': + description: Lead created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + + /leads/{id}: + get: + tags: + - leads + security: + - bearerAuth: [] + summary: Get all leads for a specific chatflow + description: Retrieve all leads associated with a specific chatflow + operationId: getAllLeadsForChatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Lead' + '400': + description: Invalid ID provided + '404': + description: Leads not found + '500': + description: Internal server error + + /ping: + get: + tags: + - ping + summary: Ping the server + description: Ping the server to check if it is running + operationId: pingServer + responses: + '200': + description: Server is running + content: + text/plain: + schema: + type: string + example: pong + '500': + description: Internal server error + + /prediction/{id}: + post: + tags: + - prediction + security: + - bearerAuth: [] + operationId: createPrediction + summary: Create a new prediction + description: Create a new prediction + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Prediction' + multipart/form-data: + schema: + type: object + properties: + question: + type: string + description: Question to ask during the prediction process + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - question + required: true + responses: + '200': + description: Prediction created successfully + content: + application/json: + schema: + type: object + properties: + text: + type: string + description: The result of the prediction + json: + type: object + description: The result of the prediction in JSON format if available + question: + type: string + description: The question asked during the prediction process + chatId: + type: string + description: The chat ID associated with the prediction + chatMessageId: + type: string + description: The chat message ID associated with the prediction + sessionId: + type: string + description: The session ID associated with the prediction + memoryType: + type: string + description: The memory type associated with the prediction + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + items: + $ref: '#/components/schemas/FileAnnotation' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + /tools: + post: + tags: + - tools + security: + - bearerAuth: [] + operationId: createTool + summary: Create a new tool + description: Create a new tool + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - tools + security: + - bearerAuth: [] + summary: List all tools + description: Retrieve a list of all tools + operationId: getAllTools + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Tool' + '500': + description: Internal server error + + /tools/{id}: + get: + tags: + - tools + security: + - bearerAuth: [] + summary: Get a tool by ID + description: Retrieve a specific tool by ID + operationId: getToolById + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + put: + tags: + - tools + security: + - bearerAuth: [] + summary: Update a tool by ID + description: Update a specific tool by ID + operationId: updateTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + required: true + responses: + '200': + description: Tool updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + description: Invalid ID or request body provided + '404': + description: Tool not found + '500': + description: Internal server error + delete: + tags: + - tools + security: + - bearerAuth: [] + summary: Delete a tool by ID + description: Delete a specific tool by ID + operationId: deleteTool + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Tool ID + responses: + '200': + description: Tool deleted successfully + '400': + description: Invalid ID provided + '404': + description: Tool not found + '500': + description: Internal server error + + /upsert-history/{id}: + get: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Get all upsert history records + description: Retrieve all upsert history records with optional filters + operationId: getAllUpsertHistory + parameters: + - in: path + name: id + required: false + schema: + type: string + description: Chatflow ID to filter records by + - in: query + name: order + required: false + schema: + type: string + enum: [ASC, DESC] + default: ASC + description: Sort order of the results (ascending or descending) + - in: query + name: startDate + required: false + schema: + type: string + format: date-time + description: Filter records from this start date (inclusive) + - in: query + name: endDate + required: false + schema: + type: string + format: date-time + description: Filter records until this end date (inclusive) + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UpsertHistoryResponse' + '500': + description: Internal server error + patch: + tags: + - upsert-history + security: + - bearerAuth: [] + summary: Delete upsert history records + description: Soft delete upsert history records by IDs + operationId: patchDeleteUpsertHistory + requestBody: + content: + application/json: + schema: + type: object + properties: + ids: + type: array + items: + type: string + format: uuid + description: List of upsert history record IDs to delete + responses: + '200': + description: Successfully deleted records + '400': + description: Invalid request body + '500': + description: Internal server error + /variables: + post: + tags: + - variables + security: + - bearerAuth: [] + operationId: createVariable + summary: Create a new variable + description: Create a new variable + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid request body + '422': + description: Validation error + '500': + description: Internal server error + get: + tags: + - variables + security: + - bearerAuth: [] + summary: List all variables + description: Retrieve a list of all variables + operationId: getAllVariables + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Variable' + '500': + description: Internal server error + + /variables/{id}: + put: + tags: + - variables + security: + - bearerAuth: [] + summary: Update a variable by ID + description: Update a specific variable by ID + operationId: updateVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + required: true + responses: + '200': + description: Variable updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Variable' + '400': + description: Invalid ID or request body provided + '404': + description: Variable not found + '500': + description: Internal server error + delete: + tags: + - variables + security: + - bearerAuth: [] + summary: Delete a variable by ID + description: Delete a specific variable by ID + operationId: deleteVariable + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Variable ID + responses: + '200': + description: Variable deleted successfully + '400': + description: Invalid ID provided + '404': + description: Variable not found + '500': + description: Internal server error + /vector/upsert/{id}: + post: + tags: + - vector + security: + - bearerAuth: [] + operationId: vectorUpsert + summary: Upsert vector embeddings + description: Upsert vector embeddings of documents in a chatflow + parameters: + - in: path + name: id + required: true + schema: + type: string + description: Chatflow ID + requestBody: + content: + application/json: + schema: + type: object + properties: + stopNodeId: + type: string + description: In cases when you have multiple vector store nodes, you can specify the node ID to store the vectors + example: 'node_1' + overrideConfig: + type: object + description: The configuration to override the default vector upsert settings (optional) + multipart/form-data: + schema: + type: object + properties: + files: + type: array + items: + type: string + format: binary + description: Files to be uploaded + modelName: + type: string + nullable: true + example: '' + description: Other override configurations + required: + - files + required: true + responses: + '200': + description: Vector embeddings upserted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/VectorUpsertResponse' + '400': + description: Invalid input provided + '404': + description: Chatflow not found + '422': + description: Validation error + '500': + description: Internal server error + +components: + responses: + UnauthorizedError: + description: Access token is missing or invalid + schemas: + ApiKey: + type: object + properties: + apiKey: + type: string + example: 'vYV8OdUMRzRQbzpp2JzY5DvriBnuVHo3pYpPQ7IJWyw=' + apiSecret: + type: string + example: '50e19a35ee1df775c09628dade1c00f0f680c6e15256e34a6eab350b38b31352df35c4db7925a3e5dd41cc773a0e2529e6c6da18408a8bbeeb0ae4b0f0ab9486.a96478a9225ed6ab' + chatFlows: + type: array + example: [] + createdAt: + type: string + example: '10-Mar-24' + id: + type: string + example: '525e4daa2104f06ffdea5c1af37009be' + keyName: + type: string + example: 'someKeyName' + + ChatMessage: + type: object + properties: + id: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + role: + type: string + enum: [apiMessage, userMessage] + example: 'apiMessage' + chatflowid: + type: string + format: uuid + example: 'd290f1ee-6c54-4b01-90e6-d701748f0852' + content: + type: string + example: 'Hello, how can I help you today?' + sourceDocuments: + type: array + nullable: true + items: + $ref: '#/components/schemas/Document' + usedTools: + type: array + nullable: true + items: + $ref: '#/components/schemas/UsedTool' + fileAnnotations: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileAnnotation' + agentReasoning: + type: array + nullable: true + items: + $ref: '#/components/schemas/AgentReasoning' + fileUploads: + type: array + nullable: true + items: + $ref: '#/components/schemas/FileUpload' + action: + type: array + nullable: true + items: + $ref: '#/components/schemas/Action' + chatType: + type: string + enum: [INTERNAL, EXTERNAL] + example: 'INTERNAL' + chatId: + type: string + example: 'chat12345' + memoryType: + type: string + nullable: true + sessionId: + type: string + nullable: true + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + leadEmail: + type: string + nullable: true + example: 'user@example.com' + + Chatflow: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + name: + type: string + example: 'MyChatFlow' + flowData: + type: string + example: '{}' + deployed: + type: boolean + isPublic: + type: boolean + apikeyid: + type: string + chatbotConfig: + type: string + example: '{}' + apiConfig: + type: string + example: '{}' + analytic: + type: string + example: '{}' + speechToText: + type: string + example: '{}' + category: + type: string + example: 'category1;category2' + type: + type: string + enum: [CHATFLOW, MULTIAGENT] + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Document: + type: object + properties: + pageContent: + type: string + example: 'This is the content of the page.' + metadata: + type: object + additionalProperties: + type: string + example: + author: 'John Doe' + date: '2024-08-24' + + UsedTool: + type: object + properties: + tool: + type: string + example: 'Name of the tool' + toolInput: + type: object + additionalProperties: + type: string + example: + input: 'search query' + toolOutput: + type: string + + FileAnnotation: + type: object + properties: + filePath: + type: string + example: 'path/to/file' + fileName: + type: string + example: 'file.txt' + + FileUpload: + type: object + properties: + data: + type: string + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + type: + type: string + example: 'image' + name: + type: string + example: 'image.png' + mime: + type: string + example: 'image/png' + Action: + type: object + properties: + id: + type: string + format: uuid + example: '61beeb58-6ebe-4d51-aa0b-41d4c546ff08' + mapping: + type: object + properties: + approve: + type: string + example: 'Yes' + reject: + type: string + example: 'No' + toolCalls: + type: array + example: [] + elements: + type: array + + AgentReasoning: + type: object + properties: + agentName: + type: string + example: 'agent' + messages: + type: array + items: + type: string + example: ['hello'] + nodeName: + type: string + example: 'seqAgent' + nodeId: + type: string + example: 'seqAgent_0' + usedTools: + type: array + items: + $ref: '#/components/schemas/UsedTool' + sourceDocuments: + type: array + items: + $ref: '#/components/schemas/Document' + state: + type: object + additionalProperties: + type: string + + Assistant: + type: object + properties: + id: + type: string + example: 'd290f1ee-6c54-4b01-90e6-d701748f0851' + details: + type: object + properties: + id: + type: string + example: 'asst_zbNeYIuXIUSKVHjJkfRo6ilv' + name: + type: string + example: 'assistant' + description: + type: string + model: + type: string + example: 'gpt-4' + instructions: + type: string + example: 'You are a helpful assistant, do your best to answer question and query' + temperature: + type: number + example: 1 + top_p: + type: number + example: 1 + tools: + type: array + items: + type: string + example: ['function', 'code_interpreter', 'file_search'] + tool_resources: + type: object + additionalProperties: + type: object + credential: + type: string + example: '7db93c02-8d5a-4117-a8f1-3dfb6721b339' + iconSrc: + type: string + example: '/images/assistant.png' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + + Credential: + type: object + properties: + id: + type: string + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + example: 'My Credential' + credentialName: + type: string + example: 'openAIAPI' + encryptedData: + type: string + example: 'U2FsdGVkX1/3T2gnnsEtX6FJi1DbnYx0VVdS3XWZ5ro=' + createdDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + example: '2024-08-24T14:15:22Z' + Prediction: + type: object + properties: + question: + type: string + description: The question being asked + overrideConfig: + type: object + description: The configuration to override the default prediction settings (optional) + history: + type: array + description: The history messages to be prepended (optional) + items: + type: object + properties: + role: + type: string + enum: [apiMessage, userMessage] + description: The role of the message + example: apiMessage + content: + type: string + description: The content of the message + example: 'Hello, how can I help you?' + uploads: + type: array + items: + type: object + properties: + type: + type: string + enum: [audio, url, file, file:rag, file:full] + description: The type of file upload + example: file + name: + type: string + description: The name of the file or resource + example: 'image.png' + data: + type: string + description: The base64-encoded data or URL for the resource + example: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG' + mime: + type: string + description: The MIME type of the file or resource + example: 'image/png' + + Tool: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the tool + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the tool + example: 'date_time_tool' + description: + type: string + description: Description of the tool + example: 'A tool used for date and time operations' + color: + type: string + description: Color associated with the tool + example: '#FF5733' + iconSrc: + type: string + nullable: true + description: Source URL for the tool's icon + example: 'https://example.com/icons/date.png' + schema: + type: string + nullable: true + description: JSON schema associated with the tool + func: + type: string + nullable: true + description: Functionality description or code associated with the tool + createdDate: + type: string + format: date-time + description: Date and time when the tool was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the tool was last updated + example: '2024-08-24T14:15:22Z' + Variable: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the variable + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the variable + example: 'API_KEY' + value: + type: string + description: Value of the variable + nullable: true + example: 'my-secret-key' + type: + type: string + description: Type of the variable (e.g., string, number) + example: 'string' + createdDate: + type: string + format: date-time + description: Date and time when the variable was created + example: '2024-08-24T14:15:22Z' + updatedDate: + type: string + format: date-time + description: Date and time when the variable was last updated + example: '2024-08-24T14:15:22Z' + VectorUpsertResponse: + type: object + properties: + numAdded: + type: number + description: Number of vectors added + example: 1 + numDeleted: + type: number + description: Number of vectors deleted + example: 1 + numUpdated: + type: number + description: Number of vectors updated + example: 1 + numSkipped: + type: number + description: Number of vectors skipped (not added, deleted, or updated) + example: 1 + addedDocs: + type: array + items: + $ref: '#/components/schemas/Document' + Lead: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the lead + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + name: + type: string + description: Name of the lead + example: 'John Doe' + email: + type: string + description: Email address of the lead + example: 'john.doe@example.com' + phone: + type: string + description: Phone number of the lead + example: '+1234567890' + chatflowid: + type: string + description: ID of the chatflow the lead is associated with + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + chatId: + type: string + description: ID of the chat session the lead is associated with + example: 'd7b0b5d8-85e6-4f2a-9c1f-9d9a0e2ebf6b' + createdDate: + type: string + format: date-time + description: Date and time when the lead was created + example: '2024-08-24T14:15:22Z' + UpsertHistoryResponse: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the upsert history record + example: 'cfd531e0-82fc-11e9-bc42-526af7764f64' + chatflowid: + type: string + description: ID of the chatflow associated with the upsert history + example: '7c4e8b7a-7b9a-4b4d-9f3e-2d28f1ebea02' + result: + type: string + description: Result of the upsert operation, stored as a JSON string + example: '{"status":"success","data":{"key":"value"}}' + flowData: + type: string + description: Flow data associated with the upsert operation, stored as a JSON string + example: '{"nodes":[],"edges":[]}' + date: + type: string + format: date-time + description: Date and time when the upsert operation was performed + example: '2024-08-24T14:15:22Z' + DocumentStore: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store + name: + type: string + description: Name of the document store + description: + type: string + description: Description of the document store + loaders: + type: string + description: Loaders associated with the document store, stored as JSON string + whereUsed: + type: string + description: Places where the document store is used, stored as JSON string + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store + vectorStoreConfig: + type: string + description: Configuration for the vector store, stored as JSON string + embeddingConfig: + type: string + description: Configuration for the embedding, stored as JSON string + recordManagerConfig: + type: string + description: Configuration for the record manager, stored as JSON string + createdDate: + type: string + format: date-time + description: Date and time when the document store was created + updatedDate: + type: string + format: date-time + description: Date and time when the document store was last updated + + DocumentStoreFileChunk: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file chunk + docId: + type: string + format: uuid + description: Document ID within the store + storeId: + type: string + format: uuid + description: Document Store ID + chunkNo: + type: integer + description: Chunk number within the document + pageContent: + type: string + description: Content of the chunk + metadata: + type: string + description: Metadata associated with the chunk + + DocumentStoreLoaderForPreview: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the document store loader + loaderId: + type: string + description: ID of the loader + loaderName: + type: string + description: Name of the loader + loaderConfig: + type: object + description: Configuration for the loader + splitterId: + type: string + description: ID of the text splitter + splitterName: + type: string + description: Name of the text splitter + splitterConfig: + type: object + description: Configuration for the text splitter + totalChunks: + type: number + description: Total number of chunks + totalChars: + type: number + description: Total number of characters + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the document store loader + storeId: + type: string + description: ID of the document store + files: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderFile' + source: + type: string + description: Source of the document store loader + credential: + type: string + description: Credential associated with the document store loader + rehydrated: + type: boolean + description: Whether the loader has been rehydrated + preview: + type: boolean + description: Whether the loader is in preview mode + previewChunkCount: + type: number + description: Number of chunks in preview mode + + DocumentStoreLoaderFile: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the file + name: + type: string + description: Name of the file + mimePrefix: + type: string + description: MIME prefix of the file + size: + type: number + description: Size of the file + status: + type: string + enum: [EMPTY, SYNC, SYNCING, STALE, NEW, UPSERTING, UPSERTED] + description: Status of the file + uploaded: + type: string + format: date-time + description: Date and time when the file was uploaded + + DocumentStoreFileChunkPagedResponse: + type: object + properties: + chunks: + type: array + items: + $ref: '#/components/schemas/DocumentStoreFileChunk' + count: + type: number + example: 1 + file: + $ref: '#/components/schemas/DocumentStoreLoaderForPreview' + currentPage: + type: number + storeName: + type: string + description: + type: string + + DocumentStoreLoaderForUpsert: + type: object + properties: + docId: + type: string + format: uuid + nullable: true + description: Document ID within the store. If provided, existing configuration from the document will be used for the new document + metadata: + type: object + nullable: true + description: Metadata associated with the document + example: { 'foo': 'bar' } + replaceExisting: + type: boolean + nullable: true + description: Whether to replace existing document loader with the new upserted chunks. However this does not delete the existing embeddings in the vector store + createNewDocStore: + type: boolean + nullable: true + description: Whether to create a new document store + docStore: + type: object + nullable: true + description: Only when createNewDocStore is true, pass in the new document store configuration + properties: + name: + type: string + example: plainText + description: Name of the new document store to be created + description: + type: string + example: plainText + description: Description of the new document store to be created + loader: + type: object + nullable: true + properties: + name: + type: string + example: plainText + description: Name of the loader (camelCase) + config: + type: object + description: Configuration for the loader + splitter: + type: object + nullable: true + properties: + name: + type: string + example: recursiveCharacterTextSplitter + description: Name of the text splitter (camelCase) + config: + type: object + description: Configuration for the text splitter + embedding: + type: object + nullable: true + properties: + name: + type: string + example: openAIEmbeddings + description: Name of the embedding generator (camelCase) + config: + type: object + description: Configuration for the embedding generator + vectorStore: + type: object + nullable: true + properties: + name: + type: string + example: faiss + description: Name of the vector store (camelCase) + config: + type: object + description: Configuration for the vector store + recordManager: + type: object + nullable: true + properties: + name: + type: string + example: postgresRecordManager + description: Name of the record manager (camelCase) + config: + type: object + description: Configuration for the record manager + + DocumentStoreLoaderForRefresh: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/DocumentStoreLoaderForUpsert' + + ChatMessageFeedback: + type: object + properties: + id: + type: string + format: uuid + description: Unique identifier for the feedback + chatflowid: + type: string + format: uuid + description: Identifier for the chat flow + chatId: + type: string + description: Identifier for the chat + messageId: + type: string + format: uuid + description: Identifier for the message + rating: + type: string + enum: [THUMBS_UP, THUMBS_DOWN] + description: Rating for the message + content: + type: string + description: Feedback content + createdDate: + type: string + format: date-time + description: Date and time when the feedback was created + + CreateAttachmentResponse: + type: object + properties: + name: + type: string + description: Name of the file + mimeType: + type: string + description: Mime type of the file + size: + type: string + description: Size of the file + content: + type: string + description: Content of the file in string format + + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT # optional, for documentation purposes only diff --git a/fr/.gitbook/assets/tavily.png b/fr/.gitbook/assets/tavily.png new file mode 100644 index 00000000..8313e561 Binary files /dev/null and b/fr/.gitbook/assets/tavily.png differ diff --git a/fr/.gitbook/assets/tavily/tavily-1.png b/fr/.gitbook/assets/tavily/tavily-1.png new file mode 100644 index 00000000..4605fe2f Binary files /dev/null and b/fr/.gitbook/assets/tavily/tavily-1.png differ diff --git a/fr/.gitbook/assets/tavily/tavily-2.png b/fr/.gitbook/assets/tavily/tavily-2.png new file mode 100644 index 00000000..8ca8d1c5 Binary files /dev/null and b/fr/.gitbook/assets/tavily/tavily-2.png differ diff --git a/fr/.gitbook/assets/ud_01.png b/fr/.gitbook/assets/ud_01.png new file mode 100644 index 00000000..9163207f Binary files /dev/null and b/fr/.gitbook/assets/ud_01.png differ diff --git a/fr/.gitbook/assets/up-001.png b/fr/.gitbook/assets/up-001.png new file mode 100644 index 00000000..2a1c18ac Binary files /dev/null and b/fr/.gitbook/assets/up-001.png differ diff --git a/fr/.gitbook/assets/up-002.png b/fr/.gitbook/assets/up-002.png new file mode 100644 index 00000000..52f595ba Binary files /dev/null and b/fr/.gitbook/assets/up-002.png differ diff --git a/fr/.gitbook/assets/up-003.png b/fr/.gitbook/assets/up-003.png new file mode 100644 index 00000000..dc6ac0c2 Binary files /dev/null and b/fr/.gitbook/assets/up-003.png differ diff --git a/fr/.gitbook/assets/up-004.png b/fr/.gitbook/assets/up-004.png new file mode 100644 index 00000000..0ba04fee Binary files /dev/null and b/fr/.gitbook/assets/up-004.png differ diff --git a/fr/.gitbook/assets/up-005.png b/fr/.gitbook/assets/up-005.png new file mode 100644 index 00000000..50d73d69 Binary files /dev/null and b/fr/.gitbook/assets/up-005.png differ diff --git a/fr/.gitbook/assets/up-006.png b/fr/.gitbook/assets/up-006.png new file mode 100644 index 00000000..39de29bf Binary files /dev/null and b/fr/.gitbook/assets/up-006.png differ diff --git a/fr/.gitbook/assets/up-007.png b/fr/.gitbook/assets/up-007.png new file mode 100644 index 00000000..964accb7 Binary files /dev/null and b/fr/.gitbook/assets/up-007.png differ diff --git a/fr/.gitbook/assets/up-008.png b/fr/.gitbook/assets/up-008.png new file mode 100644 index 00000000..e72d6e58 Binary files /dev/null and b/fr/.gitbook/assets/up-008.png differ diff --git a/fr/.gitbook/assets/up-009.png b/fr/.gitbook/assets/up-009.png new file mode 100644 index 00000000..96473c30 Binary files /dev/null and b/fr/.gitbook/assets/up-009.png differ diff --git a/fr/.gitbook/assets/up-010.png b/fr/.gitbook/assets/up-010.png new file mode 100644 index 00000000..3cba7065 Binary files /dev/null and b/fr/.gitbook/assets/up-010.png differ diff --git a/fr/.gitbook/assets/up-011.png b/fr/.gitbook/assets/up-011.png new file mode 100644 index 00000000..acafb033 Binary files /dev/null and b/fr/.gitbook/assets/up-011.png differ diff --git a/fr/.gitbook/assets/up-012.png b/fr/.gitbook/assets/up-012.png new file mode 100644 index 00000000..d9a44c88 Binary files /dev/null and b/fr/.gitbook/assets/up-012.png differ diff --git a/fr/.gitbook/assets/up-013.png b/fr/.gitbook/assets/up-013.png new file mode 100644 index 00000000..6ba61861 Binary files /dev/null and b/fr/.gitbook/assets/up-013.png differ diff --git a/fr/.gitbook/assets/up-014.png b/fr/.gitbook/assets/up-014.png new file mode 100644 index 00000000..e43fe443 Binary files /dev/null and b/fr/.gitbook/assets/up-014.png differ diff --git a/fr/.gitbook/assets/up-015.png b/fr/.gitbook/assets/up-015.png new file mode 100644 index 00000000..751d6921 Binary files /dev/null and b/fr/.gitbook/assets/up-015.png differ diff --git a/fr/.gitbook/assets/up-018.png b/fr/.gitbook/assets/up-018.png new file mode 100644 index 00000000..a079d708 Binary files /dev/null and b/fr/.gitbook/assets/up-018.png differ diff --git a/fr/.gitbook/assets/upstash/create-index.jpeg b/fr/.gitbook/assets/upstash/create-index.jpeg new file mode 100644 index 00000000..92743fd8 Binary files /dev/null and b/fr/.gitbook/assets/upstash/create-index.jpeg differ diff --git a/fr/.gitbook/assets/upstash/credentials.jpeg b/fr/.gitbook/assets/upstash/credentials.jpeg new file mode 100644 index 00000000..a4c8bfbf Binary files /dev/null and b/fr/.gitbook/assets/upstash/credentials.jpeg differ diff --git a/fr/.gitbook/assets/upstash/databrowser.jpeg b/fr/.gitbook/assets/upstash/databrowser.jpeg new file mode 100644 index 00000000..d91f5f63 Binary files /dev/null and b/fr/.gitbook/assets/upstash/databrowser.jpeg differ diff --git a/fr/.gitbook/assets/upstash/env-variables.jpeg b/fr/.gitbook/assets/upstash/env-variables.jpeg new file mode 100644 index 00000000..92743fd8 Binary files /dev/null and b/fr/.gitbook/assets/upstash/env-variables.jpeg differ diff --git a/fr/.gitbook/assets/upstash/flowise-design.jpeg b/fr/.gitbook/assets/upstash/flowise-design.jpeg new file mode 100644 index 00000000..5519d6b4 Binary files /dev/null and b/fr/.gitbook/assets/upstash/flowise-design.jpeg differ diff --git a/fr/.gitbook/assets/upstash/list-index.jpeg b/fr/.gitbook/assets/upstash/list-index.jpeg new file mode 100644 index 00000000..db5d7d91 Binary files /dev/null and b/fr/.gitbook/assets/upstash/list-index.jpeg differ diff --git a/fr/.gitbook/assets/upstash/upstash-node.jpeg b/fr/.gitbook/assets/upstash/upstash-node.jpeg new file mode 100644 index 00000000..3fa6df21 Binary files /dev/null and b/fr/.gitbook/assets/upstash/upstash-node.jpeg differ diff --git a/fr/.gitbook/assets/vectara/1.png b/fr/.gitbook/assets/vectara/1.png new file mode 100644 index 00000000..575a5236 Binary files /dev/null and b/fr/.gitbook/assets/vectara/1.png differ diff --git a/fr/.gitbook/assets/vectara/2.png b/fr/.gitbook/assets/vectara/2.png new file mode 100644 index 00000000..95c6a532 Binary files /dev/null and b/fr/.gitbook/assets/vectara/2.png differ diff --git a/fr/.gitbook/assets/vectara/3.png b/fr/.gitbook/assets/vectara/3.png new file mode 100644 index 00000000..d8236eb3 Binary files /dev/null and b/fr/.gitbook/assets/vectara/3.png differ diff --git a/fr/.gitbook/assets/vectara/4.png b/fr/.gitbook/assets/vectara/4.png new file mode 100644 index 00000000..4170964b Binary files /dev/null and b/fr/.gitbook/assets/vectara/4.png differ diff --git a/fr/.gitbook/assets/vectara/5.png b/fr/.gitbook/assets/vectara/5.png new file mode 100644 index 00000000..808bbdff Binary files /dev/null and b/fr/.gitbook/assets/vectara/5.png differ diff --git a/fr/.gitbook/assets/vectara/6.png b/fr/.gitbook/assets/vectara/6.png new file mode 100644 index 00000000..f0d16e32 Binary files /dev/null and b/fr/.gitbook/assets/vectara/6.png differ diff --git a/fr/.gitbook/assets/web-scrape-pinecone.png b/fr/.gitbook/assets/web-scrape-pinecone.png new file mode 100644 index 00000000..0d587dd9 Binary files /dev/null and b/fr/.gitbook/assets/web-scrape-pinecone.png differ diff --git a/fr/.gitbook/assets/white.png b/fr/.gitbook/assets/white.png new file mode 100644 index 00000000..7b7b9ea3 Binary files /dev/null and b/fr/.gitbook/assets/white.png differ diff --git a/fr/.gitbook/assets/writer.png b/fr/.gitbook/assets/writer.png new file mode 100644 index 00000000..5ccd0f83 Binary files /dev/null and b/fr/.gitbook/assets/writer.png differ diff --git a/fr/.gitbook/assets/zapier/nla/1.png b/fr/.gitbook/assets/zapier/nla/1.png new file mode 100644 index 00000000..33c5b663 Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/1.png differ diff --git a/fr/.gitbook/assets/zapier/nla/2.png b/fr/.gitbook/assets/zapier/nla/2.png new file mode 100644 index 00000000..b452d3bf Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/2.png differ diff --git a/fr/.gitbook/assets/zapier/nla/3.png b/fr/.gitbook/assets/zapier/nla/3.png new file mode 100644 index 00000000..0205f024 Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/3.png differ diff --git a/fr/.gitbook/assets/zapier/nla/4.png b/fr/.gitbook/assets/zapier/nla/4.png new file mode 100644 index 00000000..99312b68 Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/4.png differ diff --git a/fr/.gitbook/assets/zapier/nla/5.png b/fr/.gitbook/assets/zapier/nla/5.png new file mode 100644 index 00000000..06b23581 Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/5.png differ diff --git a/fr/.gitbook/assets/zapier/nla/6.png b/fr/.gitbook/assets/zapier/nla/6.png new file mode 100644 index 00000000..fbc834bb Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/6.png differ diff --git a/fr/.gitbook/assets/zapier/nla/7.png b/fr/.gitbook/assets/zapier/nla/7.png new file mode 100644 index 00000000..94609c96 Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/7.png differ diff --git a/fr/.gitbook/assets/zapier/nla/8.png b/fr/.gitbook/assets/zapier/nla/8.png new file mode 100644 index 00000000..93eb75af Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/8.png differ diff --git a/fr/.gitbook/assets/zapier/nla/9.png b/fr/.gitbook/assets/zapier/nla/9.png new file mode 100644 index 00000000..72a4859b Binary files /dev/null and b/fr/.gitbook/assets/zapier/nla/9.png differ diff --git a/fr/.gitbook/assets/zapier/zap/1.png b/fr/.gitbook/assets/zapier/zap/1.png new file mode 100644 index 00000000..cdd1e6a9 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/1.png differ diff --git a/fr/.gitbook/assets/zapier/zap/10.png b/fr/.gitbook/assets/zapier/zap/10.png new file mode 100644 index 00000000..770ce515 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/10.png differ diff --git a/fr/.gitbook/assets/zapier/zap/11.png b/fr/.gitbook/assets/zapier/zap/11.png new file mode 100644 index 00000000..0e1f5d3b Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/11.png differ diff --git a/fr/.gitbook/assets/zapier/zap/12.png b/fr/.gitbook/assets/zapier/zap/12.png new file mode 100644 index 00000000..667c65be Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/12.png differ diff --git a/fr/.gitbook/assets/zapier/zap/13.png b/fr/.gitbook/assets/zapier/zap/13.png new file mode 100644 index 00000000..8aee1edb Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/13.png differ diff --git a/fr/.gitbook/assets/zapier/zap/14.png b/fr/.gitbook/assets/zapier/zap/14.png new file mode 100644 index 00000000..4bef9d6d Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/14.png differ diff --git a/fr/.gitbook/assets/zapier/zap/15.png b/fr/.gitbook/assets/zapier/zap/15.png new file mode 100644 index 00000000..bd07b861 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/15.png differ diff --git a/fr/.gitbook/assets/zapier/zap/16.png b/fr/.gitbook/assets/zapier/zap/16.png new file mode 100644 index 00000000..87e1c401 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/16.png differ diff --git a/fr/.gitbook/assets/zapier/zap/17.png b/fr/.gitbook/assets/zapier/zap/17.png new file mode 100644 index 00000000..20fd2eff Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/17.png differ diff --git a/fr/.gitbook/assets/zapier/zap/18.png b/fr/.gitbook/assets/zapier/zap/18.png new file mode 100644 index 00000000..652ec4cd Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/18.png differ diff --git a/fr/.gitbook/assets/zapier/zap/19.png b/fr/.gitbook/assets/zapier/zap/19.png new file mode 100644 index 00000000..5d0e5ebf Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/19.png differ diff --git a/fr/.gitbook/assets/zapier/zap/2.png b/fr/.gitbook/assets/zapier/zap/2.png new file mode 100644 index 00000000..69b43621 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/2.png differ diff --git a/fr/.gitbook/assets/zapier/zap/20.png b/fr/.gitbook/assets/zapier/zap/20.png new file mode 100644 index 00000000..47667132 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/20.png differ diff --git a/fr/.gitbook/assets/zapier/zap/21.png b/fr/.gitbook/assets/zapier/zap/21.png new file mode 100644 index 00000000..b0a1c0d9 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/21.png differ diff --git a/fr/.gitbook/assets/zapier/zap/22.png b/fr/.gitbook/assets/zapier/zap/22.png new file mode 100644 index 00000000..bd8e8e34 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/22.png differ diff --git a/fr/.gitbook/assets/zapier/zap/23.png b/fr/.gitbook/assets/zapier/zap/23.png new file mode 100644 index 00000000..a3467930 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/23.png differ diff --git a/fr/.gitbook/assets/zapier/zap/24.png b/fr/.gitbook/assets/zapier/zap/24.png new file mode 100644 index 00000000..c356925c Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/24.png differ diff --git a/fr/.gitbook/assets/zapier/zap/25.png b/fr/.gitbook/assets/zapier/zap/25.png new file mode 100644 index 00000000..76527a91 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/25.png differ diff --git a/fr/.gitbook/assets/zapier/zap/26.png b/fr/.gitbook/assets/zapier/zap/26.png new file mode 100644 index 00000000..74a4c55b Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/26.png differ diff --git a/fr/.gitbook/assets/zapier/zap/3.png b/fr/.gitbook/assets/zapier/zap/3.png new file mode 100644 index 00000000..46eb81b8 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/3.png differ diff --git a/fr/.gitbook/assets/zapier/zap/4.png b/fr/.gitbook/assets/zapier/zap/4.png new file mode 100644 index 00000000..6ba39b85 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/4.png differ diff --git a/fr/.gitbook/assets/zapier/zap/5.png b/fr/.gitbook/assets/zapier/zap/5.png new file mode 100644 index 00000000..804367de Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/5.png differ diff --git a/fr/.gitbook/assets/zapier/zap/6.png b/fr/.gitbook/assets/zapier/zap/6.png new file mode 100644 index 00000000..d42ddeac Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/6.png differ diff --git a/fr/.gitbook/assets/zapier/zap/7.png b/fr/.gitbook/assets/zapier/zap/7.png new file mode 100644 index 00000000..ce2ca2e8 Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/7.png differ diff --git a/fr/.gitbook/assets/zapier/zap/8.png b/fr/.gitbook/assets/zapier/zap/8.png new file mode 100644 index 00000000..851e4f5f Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/8.png differ diff --git a/fr/.gitbook/assets/zapier/zap/9.png b/fr/.gitbook/assets/zapier/zap/9.png new file mode 100644 index 00000000..fcaa125a Binary files /dev/null and b/fr/.gitbook/assets/zapier/zap/9.png differ diff --git a/fr/.gitbook/assets/zeabur/1.png b/fr/.gitbook/assets/zeabur/1.png new file mode 100644 index 00000000..2b4186e5 Binary files /dev/null and b/fr/.gitbook/assets/zeabur/1.png differ diff --git a/fr/.gitbook/assets/zeabur/2.png b/fr/.gitbook/assets/zeabur/2.png new file mode 100644 index 00000000..8d8063fb Binary files /dev/null and b/fr/.gitbook/assets/zeabur/2.png differ diff --git a/fr/.gitbook/assets/zeabur/3.png b/fr/.gitbook/assets/zeabur/3.png new file mode 100644 index 00000000..af9e1687 Binary files /dev/null and b/fr/.gitbook/assets/zeabur/3.png differ diff --git a/fr/.gitbook/assets/zeabur/4.png b/fr/.gitbook/assets/zeabur/4.png new file mode 100644 index 00000000..ab41bc8a Binary files /dev/null and b/fr/.gitbook/assets/zeabur/4.png differ diff --git a/fr/README.md b/fr/README.md new file mode 100644 index 00000000..460113fa --- /dev/null +++ b/fr/README.md @@ -0,0 +1,71 @@ +--- +description: Bienvenue dans la documentation officielle de Flowise +--- + +# Introduction + +
+ +Flowise est une plateforme de développement d'IA générative open source pour la création d'agents IA et de workflows LLM. + +Elle offre une solution complète qui inclut : + +* [x] Constructeur Visuel +* [x] Suivi & Analytique +* [x] Évaluations +* [x] Humain dans la Boucle +* [x] API, CLI, SDK, Chatbot Intégré +* [x] Équipes & Espaces de Travail + +Il existe 3 principaux constructeurs visuels, à savoir : + +* Assistant +* Chatflow +* Agentflow + +## Assistant + +L'Assistant est la manière la plus conviviale de créer un agent IA. Les utilisateurs peuvent créer un assistant de chat capable de suivre des instructions, d'utiliser des outils si nécessaire, et de récupérer des connaissances à partir de fichiers téléchargés ([RAG](https://en.wikipedia.org/wiki/Retrieval-augmented_generation)) pour répondre aux requêtes des utilisateurs. + +
+ +## Chatflow + +Chatflow est conçu pour construire des systèmes à agent unique, des chatbots et des flux LLM simples. Il est plus flexible que l'Assistant. Les utilisateurs peuvent utiliser des techniques avancées telles que Graph RAG, Reranker, Retriever, etc. + +
+ +## Agentflow + +Agentflow est le sur-ensemble de Chatflow et de l'Assistant. Il peut être utilisé pour créer un assistant de chat, un système à agent unique, des systèmes multi-agents et une orchestration de workflows complexes. En savoir plus sur [Agentflow V2](using-flowise/agentflowv2.md) + +
+ +## Capacités de Flowise + +| Domaine de Fonctionnalité | Capacités de Flowise | +| ---------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| Orchestration | Éditeur visuel, prend en charge les modèles open-source et propriétaires, expressions, code personnalisé, logique de branchement/boucle/routage | +| Ingestion et Intégration des Données | Se connecte à plus de 100 sources, outils, bases de données vectorielles, mémoires | +| Surveillance | Journaux d'exécution, débogage visuel, diffusion de journaux externes | +| Déploiement | Options auto-hébergées, déploiement en environnement isolé | +| Traitement des Données | Transformations de données, filtres, agrégats, code personnalisé, pipelines d'indexation RAG | +| Mémoire et Planification | Techniques d'optimisation de mémoire variées et intégrations | +| Intégration MCP | Nœuds client/serveur MCP, liste d'outils, SSE, support d'authentification | +| Sécurité et Contrôle | Modération des entrées et post-traitement des sorties | +| API, SDK, CLI | Accès API, SDK JS/Python, Interface en Ligne de Commande | +| Chatbot Intégré et Partagé | Widget de chat intégré personnalisable et composant | +| Modèles et Composants | Marché de modèles, composants réutilisables | +| Contrôles de Sécurité | RBAC, SSO, identifiants chiffrés, gestionnaires de secrets, limitation de taux, domaines restreints | +| Scalabilité | Échelle verticale/horizontale, charge de travail/flux élevée | +| Évaluations | Ensembles de données, Évaluateurs et Évaluations | +| Support Communautaire | Forum communautaire actif | +| Support Fournisseur | Support SLA, consultations, tarification fixe/déterministe | + +## Contribuer + +Si vous souhaitez aider ce projet, veuillez envisager de consulter le [Guide de Contribution](broken-reference/). + +## Besoin d'aide ? + +Pour un support et des discussions supplémentaires, rendez-vous sur notre serveur [Discord](https://discord.gg/jbaHfsRVBW). \ No newline at end of file diff --git a/fr/SUMMARY.md b/fr/SUMMARY.md new file mode 100644 index 00000000..36a17f7d --- /dev/null +++ b/fr/SUMMARY.md @@ -0,0 +1,342 @@ +# Table des matières + +* [Introduction](README.md) +* [Get Started](getting-started/README.md) +* [Contribution Guide](contributing/README.md) + * [Building Node](contributing/building-node.md) +* [API Reference](api-reference/README.md) + * [Assistants](api-reference/assistants.md) + * [Attachments](api-reference/attachments.md) + * [Chat Message](api-reference/chat-message.md) + * [Chatflows](api-reference/chatflows.md) + * [Document Store](api-reference/document-store.md) + * [Feedback](api-reference/feedback.md) + * [Leads](api-reference/leads.md) + * [Ping](api-reference/ping.md) + * [Prediction](api-reference/prediction.md) + * [Tools](api-reference/tools.md) + * [Upsert History](api-reference/upsert-history.md) + * [Variables](api-reference/variables.md) + * [Vector Upsert](api-reference/vector-upsert.md) +* [CLI Reference](cli-reference/README.md) + * [User](cli-reference/user.md) +* [Using Flowise](using-flowise/README.md) + * [Agentflow V2](using-flowise/agentflowv2.md) + * [Agentflow V1 (Deprecating)](using-flowise/agentflowv1/README.md) + * [Multi-Agents](using-flowise/agentflowv1/multi-agents.md) + * [Sequential Agents](using-flowise/agentflowv1/sequential-agents/README.md) + * [Video Tutorials](using-flowise/agentflowv1/sequential-agents/video-tutorials.md) + * [Prediction](using-flowise/prediction.md) + * [Streaming](using-flowise/streaming.md) + * [Document Stores](using-flowise/document-stores.md) + * [Upsertion](using-flowise/upsertion.md) + * [Analytic](using-flowise/analytics/README.md) + * [Arize](using-flowise/analytics/arize.md) + * [LangWatch](using-flowise/analytics/langwatch.md) + * [Langfuse](using-flowise/analytics/langfuse.md) + * [Lunary](using-flowise/analytics/lunary.md) + * [Opik](using-flowise/analytics/opik.md) + * [Phoenix](using-flowise/analytics/phoenix.md) + * [Monitoring](using-flowise/monitoring.md) + * [Embed](using-flowise/embed.md) + * [Uploads](using-flowise/uploads.md) + * [Variables](using-flowise/variables.md) + * [Workspaces](using-flowise/workspaces.md) + * [Evaluations](using-flowise/evaluations.md) +* [Configuration](configuration/README.md) + * [Auth](configuration/authorization/README.md) + * [Application](configuration/authorization/app-level.md) + * [Flows](configuration/authorization/chatflow-level.md) + * [Databases](configuration/databases.md) + * [Deployment](configuration/deployment/README.md) + * [AWS](configuration/deployment/aws.md) + * [Azure](configuration/deployment/azure.md) + * [Alibaba Cloud](https://aliyun-computenest.github.io/quickstart-flowise/) + * [Digital Ocean](configuration/deployment/digital-ocean.md) + * [Elestio](https://elest.io/open-source/flowiseai) + * [GCP](configuration/deployment/gcp.md) + * [Hugging Face](configuration/deployment/hugging-face.md) + * [Kubernetes using Helm](https://artifacthub.io/packages/helm/cowboysysop/flowise) + * [Railway](configuration/deployment/railway.md) + * [Render](configuration/deployment/render.md) + * [Replit](configuration/deployment/replit.md) + * [RepoCloud](https://repocloud.io/details/?app_id=29) + * [Sealos](configuration/deployment/sealos.md) + * [Zeabur](configuration/deployment/zeabur.md) + * [Environment Variables](configuration/environment-variables.md) + * [Rate Limit](configuration/rate-limit.md) + * [Running Flowise behind company proxy](configuration/running-flowise-behind-company-proxy.md) + * [SSO](configuration/sso.md) + * [Running Flowise using Queue](configuration/running-flowise-using-queue.md) + * [Running in Production](configuration/running-in-production.md) +* [Integrations](integrations/README.md) + * [LangChain](integrations/langchain/README.md) + * [Agents](integrations/langchain/agents/README.md) + * [Airtable Agent](integrations/langchain/agents/airtable-agent.md) + * [AutoGPT](integrations/langchain/agents/autogpt.md) + * [BabyAGI](integrations/langchain/agents/babyagi.md) + * [CSV Agent](integrations/langchain/agents/csv-agent.md) + * [Conversational Agent](integrations/langchain/agents/conversational-agent.md) + * [Conversational Retrieval Agent](integrations/langchain/agents/conversational-retrieval-agent.md) + * [MistralAI Tool Agent](integrations/langchain/agents/mistralai-tool-agent.md) + * [OpenAI Assistant](integrations/langchain/agents/openai-assistant/README.md) + * [Threads](integrations/langchain/agents/openai-assistant/threads.md) + * [OpenAI Function Agent](integrations/langchain/agents/openai-function-agent.md) + * [OpenAI Tool Agent](integrations/langchain/agents/openai-tool-agent.md) + * [ReAct Agent Chat](integrations/langchain/agents/react-agent-chat.md) + * [ReAct Agent LLM](integrations/langchain/agents/react-agent-llm.md) + * [Tool Agent](integrations/langchain/agents/tool-agent.md) + * [XML Agent](integrations/langchain/agents/xml-agent.md) + * [Cache](integrations/langchain/cache/README.md) + * [InMemory Cache](integrations/langchain/cache/in-memory-cache.md) + * [InMemory Embedding Cache](integrations/langchain/cache/inmemory-embedding-cache.md) + * [Momento Cache](integrations/langchain/cache/momento-cache.md) + * [Redis Cache](integrations/langchain/cache/redis-cache.md) + * [Redis Embeddings Cache](integrations/langchain/cache/redis-embeddings-cache.md) + * [Upstash Redis Cache](integrations/langchain/cache/upstash-redis-cache.md) + * [Chains](integrations/langchain/chains/README.md) + * [GET API Chain](integrations/langchain/chains/get-api-chain.md) + * [OpenAPI Chain](integrations/langchain/chains/openapi-chain.md) + * [POST API Chain](integrations/langchain/chains/post-api-chain.md) + * [Conversation Chain](integrations/langchain/chains/conversation-chain.md) + * [Conversational Retrieval QA Chain](integrations/langchain/chains/conversational-retrieval-qa-chain.md) + * [LLM Chain](integrations/langchain/chains/llm-chain.md) + * [Multi Prompt Chain](integrations/langchain/chains/multi-prompt-chain.md) + * [Multi Retrieval QA Chain](integrations/langchain/chains/multi-retrieval-qa-chain.md) + * [Retrieval QA Chain](integrations/langchain/chains/retrieval-qa-chain.md) + * [Sql Database Chain](integrations/langchain/chains/sql-database-chain.md) + * [Vectara QA Chain](integrations/langchain/chains/vectara-chain.md) + * [VectorDB QA Chain](integrations/langchain/chains/vectordb-qa-chain.md) + * [Chat Models](integrations/langchain/chat-models/README.md) + * [AWS ChatBedrock](integrations/langchain/chat-models/aws-chatbedrock.md) + * [Azure ChatOpenAI](integrations/langchain/chat-models/azure-chatopenai-1.md) + * [NVIDIA NIM](integrations/langchain/chat-models/nvidia-nim.md) + * [ChatCometAPI](integrations/langchain/chat-models/chatcometapi.md) + * [ChatAnthropic](integrations/langchain/chat-models/chatanthropic.md) + * [ChatCohere](integrations/langchain/chat-models/chatcohere.md) + * [Chat Fireworks](integrations/langchain/chat-models/chat-fireworks.md) + * [ChatGoogleGenerativeAI](integrations/langchain/chat-models/google-ai.md) + * [Google VertexAI](integrations/langchain/chat-models/google-vertexai.md) + * [ChatHuggingFace](integrations/langchain/chat-models/chathuggingface.md) + * [ChatLocalAI](integrations/langchain/chat-models/chatlocalai.md) + * [ChatMistralAI](integrations/langchain/chat-models/mistral-ai.md) + * [IBM Watsonx](integrations/langchain/chat-models/ibm-watsonx.md) + * [ChatOllama](integrations/langchain/chat-models/chatollama.md) + * [ChatOpenAI](integrations/langchain/chat-models/azure-chatopenai.md) + * [ChatTogetherAI](integrations/langchain/chat-models/chattogetherai.md) + * [GroqChat](integrations/langchain/chat-models/groqchat.md) + * [Document Loaders](integrations/langchain/document-loaders/README.md) + * [Airtable](integrations/langchain/document-loaders/airtable.md) + * [API Loader](integrations/langchain/document-loaders/api-loader.md) + * [Apify Website Content Crawler](integrations/langchain/document-loaders/apify-website-content-crawler.md) + * [BraveSearch Loader](integrations/langchain/document-loaders/bravesearch-api.md) + * [Cheerio Web Scraper](integrations/langchain/document-loaders/cheerio-web-scraper.md) + * [Confluence](integrations/langchain/document-loaders/confluence.md) + * [Csv File](integrations/langchain/document-loaders/csv-file.md) + * [Custom Document Loader](integrations/langchain/document-loaders/custom-document-loader.md) + * [Document Store](integrations/langchain/document-loaders/document-store.md) + * [Docx File](integrations/langchain/document-loaders/docx-file.md) + * [Epub File](integrations/langchain/document-loaders/epub-file.md) + * [Figma](integrations/langchain/document-loaders/figma.md) + * [File](integrations/langchain/document-loaders/file-loader.md) + * [FireCrawl](integrations/langchain/document-loaders/firecrawl.md) + * [Folder](integrations/langchain/document-loaders/folder.md) + * [GitBook](integrations/langchain/document-loaders/gitbook.md) + * [Github](integrations/langchain/document-loaders/github.md) + * [Google Drive](integrations/langchain/document-loaders/google-drive.md) + * [Google Sheets](integrations/langchain/document-loaders/google-sheets.md) + * [Jira](integrations/langchain/document-loaders/jira.md) + * [Json File](integrations/langchain/document-loaders/json-file.md) + * [Json Lines File](integrations/langchain/document-loaders/jsonlines.md) + * [Microsoft Excel](integrations/langchain/document-loaders/microsoft-excel.md) + * [Microsoft Powerpoint](integrations/langchain/document-loaders/microsoft-powerpoint.md) + * [Microsoft Word](integrations/langchain/document-loaders/microsoft-word.md) + * [Notion](integrations/langchain/document-loaders/notion.md) + * [Oxylabs](integrations/langchain/document-loaders/oxylabs.md) + * [PDF Files](integrations/langchain/document-loaders/pdf-file.md) + * [Plain Text](integrations/langchain/document-loaders/plain-text.md) + * [Playwright Web Scraper](integrations/langchain/document-loaders/playwright-web-scraper.md) + * [Puppeteer Web Scraper](integrations/langchain/document-loaders/puppeteer-web-scraper.md) + * [S3 File Loader](integrations/langchain/document-loaders/s3-file-loader.md) + * [SearchApi For Web Search](integrations/langchain/document-loaders/searchapi-for-web-search.md) + * [SerpApi For Web Search](integrations/langchain/document-loaders/serpapi-for-web-search.md) + * [Spider - web search & crawler](integrations/langchain/document-loaders/spider-web-scraper-crawler.md) + * [Text File](integrations/langchain/document-loaders/text-file.md) + * [Unstructured File Loader](integrations/langchain/document-loaders/unstructured-file-loader.md) + * [Unstructured Folder Loader](integrations/langchain/document-loaders/unstructured-folder-loader.md) + * [Embeddings](integrations/langchain/embeddings/README.md) + * [AWS Bedrock Embeddings](integrations/langchain/embeddings/aws-bedrock-embeddings.md) + * [Azure OpenAI Embeddings](integrations/langchain/embeddings/azure-openai-embeddings.md) + * [Cohere Embeddings](integrations/langchain/embeddings/cohere-embeddings.md) + * [Google GenerativeAI Embeddings](integrations/langchain/embeddings/googlegenerativeai-embeddings.md) + * [Google VertexAI Embeddings](integrations/langchain/embeddings/googlevertexai-embeddings.md) + * [HuggingFace Inference Embeddings](integrations/langchain/embeddings/huggingface-inference-embeddings.md) + * [LocalAI Embeddings](integrations/langchain/embeddings/localai-embeddings.md) + * [MistralAI Embeddings](integrations/langchain/embeddings/mistralai-embeddings.md) + * [Ollama Embeddings](integrations/langchain/embeddings/ollama-embeddings.md) + * [OpenAI Embeddings](integrations/langchain/embeddings/openai-embeddings.md) + * [OpenAI Embeddings Custom](integrations/langchain/embeddings/openai-embeddings-custom.md) + * [TogetherAI Embedding](integrations/langchain/embeddings/togetherai-embedding.md) + * [VoyageAI Embeddings](integrations/langchain/embeddings/voyageai-embeddings.md) + * [LLMs](integrations/langchain/llms/README.md) + * [AWS Bedrock](integrations/langchain/llms/aws-bedrock.md) + * [Azure OpenAI](integrations/langchain/llms/azure-openai.md) + * [Cohere](integrations/langchain/llms/cohere.md) + * [GoogleVertex AI](integrations/langchain/llms/googlevertex-ai.md) + * [HuggingFace Inference](integrations/langchain/llms/huggingface-inference.md) + * [Ollama](integrations/langchain/llms/ollama.md) + * [OpenAI](integrations/langchain/llms/openai.md) + * [Replicate](integrations/langchain/llms/replicate.md) + * [Memory](integrations/langchain/memory/README.md) + * [Buffer Memory](integrations/langchain/memory/buffer-memory.md) + * [Buffer Window Memory](integrations/langchain/memory/buffer-window-memory.md) + * [Conversation Summary Memory](integrations/langchain/memory/conversation-summary-memory.md) + * [Conversation Summary Buffer Memory](integrations/langchain/memory/conversation-summary-buffer-memory.md) + * [DynamoDB Chat Memory](integrations/langchain/memory/dynamodb-chat-memory.md) + * [MongoDB Atlas Chat Memory](integrations/langchain/memory/mongodb-atlas-chat-memory.md) + * [Redis-Backed Chat Memory](integrations/langchain/memory/redis-backed-chat-memory.md) + * [Upstash Redis-Backed Chat Memory](integrations/langchain/memory/upstash-redis-backed-chat-memory.md) + * [Zep Memory](integrations/langchain/memory/zep-memory.md) + * [Moderation](integrations/langchain/moderation/README.md) + * [OpenAI Moderation](integrations/langchain/moderation/openai-moderation.md) + * [Simple Prompt Moderation](integrations/langchain/moderation/simple-prompt-moderation.md) + * [Output Parsers](integrations/langchain/output-parsers/README.md) + * [CSV Output Parser](integrations/langchain/output-parsers/csv-output-parser.md) + * [Custom List Output Parser](integrations/langchain/output-parsers/custom-list-output-parser.md) + * [Structured Output Parser](integrations/langchain/output-parsers/structured-output-parser.md) + * [Advanced Structured Output Parser](integrations/langchain/output-parsers/advanced-structured-output-parser.md) + * [Prompts](integrations/langchain/prompts/README.md) + * [Chat Prompt Template](integrations/langchain/prompts/chat-prompt-template.md) + * [Few Shot Prompt Template](integrations/langchain/prompts/few-shot-prompt-template.md) + * [Prompt Template](integrations/langchain/prompts/prompt-template.md) + * [Record Managers](integrations/langchain/record-managers.md) + * [Retrievers](integrations/langchain/retrievers/README.md) + * [Extract Metadata Retriever](integrations/langchain/retrievers/extract-metadata-retriever.md) + * [Custom Retriever](integrations/langchain/retrievers/custom-retriever.md) + * [Cohere Rerank Retriever](integrations/langchain/retrievers/cohere-rerank-retriever.md) + * [Embeddings Filter Retriever](integrations/langchain/retrievers/embeddings-filter-retriever.md) + * [HyDE Retriever](integrations/langchain/retrievers/hyde-retriever.md) + * [LLM Filter Retriever](integrations/langchain/retrievers/llm-filter-retriever.md) + * [Multi Query Retriever](integrations/langchain/retrievers/multi-query-retriever.md) + * [Prompt Retriever](integrations/langchain/retrievers/prompt-retriever.md) + * [Reciprocal Rank Fusion Retriever](integrations/langchain/retrievers/reciprocal-rank-fusion-retriever.md) + * [Similarity Score Threshold Retriever](integrations/langchain/retrievers/similarity-score-threshold-retriever.md) + * [Vector Store Retriever](integrations/langchain/retrievers/vector-store-retriever.md) + * [Voyage AI Rerank Retriever](integrations/langchain/retrievers/page.md) + * [Text Splitters](integrations/langchain/text-splitters/README.md) + * [Character Text Splitter](integrations/langchain/text-splitters/character-text-splitter.md) + * [Code Text Splitter](integrations/langchain/text-splitters/code-text-splitter.md) + * [Html-To-Markdown Text Splitter](integrations/langchain/text-splitters/html-to-markdown-text-splitter.md) + * [Markdown Text Splitter](integrations/langchain/text-splitters/markdown-text-splitter.md) + * [Recursive Character Text Splitter](integrations/langchain/text-splitters/recursive-character-text-splitter.md) + * [Token Text Splitter](integrations/langchain/text-splitters/token-text-splitter.md) + * [Tools](integrations/langchain/tools/README.md) + * [BraveSearch API](integrations/langchain/tools/bravesearch-api.md) + * [Calculator](integrations/langchain/tools/calculator.md) + * [Chain Tool](integrations/langchain/tools/chain-tool.md) + * [Chatflow Tool](integrations/langchain/tools/chatflow-tool.md) + * [Custom Tool](integrations/langchain/tools/custom-tool.md) + * [Exa Search](integrations/langchain/tools/exa-search.md) + * [Gmail](integrations/langchain/tools/gmail.md) + * [Google Calendar](integrations/langchain/tools/google-calendar.md) + * [Google Custom Search](integrations/langchain/tools/google-custom-search.md) + * [Google Drive](integrations/langchain/tools/google-drive.md) + * [Google Sheets](integrations/langchain/tools/google-sheets.md) + * [Microsoft Outlook](integrations/langchain/tools/microsoft-outlook.md) + * [Microsoft Teams](integrations/langchain/tools/microsoft-teams.md) + * [OpenAPI Toolkit](integrations/langchain/tools/openapi-toolkit.md) + * [Code Interpreter by E2B](integrations/langchain/tools/python-interpreter.md) + * [Read File](integrations/langchain/tools/read-file.md) + * [Request Get](integrations/langchain/tools/request-get.md) + * [Request Post](integrations/langchain/tools/request-post.md) + * [Retriever Tool](integrations/langchain/tools/retriever-tool.md) + * [SearchApi](integrations/langchain/tools/searchapi.md) + * [SearXNG](integrations/langchain/tools/searxng.md) + * [Serp API](integrations/langchain/tools/serp-api.md) + * [Serper](integrations/langchain/tools/serper.md) + * [Tavily](integrations/langchain/tools/tavily-ai.md) + * [Web Browser](integrations/langchain/tools/web-browser.md) + * [Write File](integrations/langchain/tools/write-file.md) + * [Vector Stores](integrations/langchain/vector-stores/README.md) + * [AstraDB](integrations/langchain/vector-stores/astradb.md) + * [Chroma](integrations/langchain/vector-stores/chroma.md) + * [Couchbase](integrations/langchain/vector-stores/couchbase.md) + * [Elastic](integrations/langchain/vector-stores/elastic.md) + * [Faiss](integrations/langchain/vector-stores/faiss.md) + * [In-Memory Vector Store](integrations/langchain/vector-stores/in-memory-vector-store.md) + * [Milvus](integrations/langchain/vector-stores/milvus.md) + * [MongoDB Atlas](integrations/langchain/vector-stores/mongodb-atlas.md) + * [OpenSearch](integrations/langchain/vector-stores/opensearch.md) + * [Pinecone](integrations/langchain/vector-stores/pinecone.md) + * [Postgres](integrations/langchain/vector-stores/postgres.md) + * [Qdrant](integrations/langchain/vector-stores/qdrant.md) + * [Redis](integrations/langchain/vector-stores/redis.md) + * [SingleStore](integrations/langchain/vector-stores/singlestore.md) + * [Supabase](integrations/langchain/vector-stores/supabase.md) + * [Upstash Vector](integrations/langchain/vector-stores/upstash-vector.md) + * [Vectara](integrations/langchain/vector-stores/vectara.md) + * [Weaviate](integrations/langchain/vector-stores/weaviate.md) + * [Zep Collection - Open Source](integrations/langchain/vector-stores/zep-collection-open-source.md) + * [Zep Collection - Cloud](integrations/langchain/vector-stores/zep-collection-cloud.md) + * [LiteLLM Proxy](integrations/litellm/README.md) + * [LlamaIndex](integrations/llamaindex/README.md) + * [Agents](integrations/llamaindex/agents/README.md) + * [OpenAI Tool Agent](integrations/llamaindex/agents/openai-tool-agent.md) + * [Anthropic Tool Agent](integrations/llamaindex/agents/openai-tool-agent-1.md) + * [Chat Models](integrations/llamaindex/chat-models/README.md) + * [AzureChatOpenAI](integrations/llamaindex/chat-models/azurechatopenai.md) + * [ChatAnthropic](integrations/llamaindex/chat-models/chatanthropic.md) + * [ChatMistral](integrations/llamaindex/chat-models/chatmistral.md) + * [ChatOllama](integrations/llamaindex/chat-models/chatollama.md) + * [ChatOpenAI](integrations/llamaindex/chat-models/chatopenai.md) + * [ChatTogetherAI](integrations/llamaindex/chat-models/chattogetherai.md) + * [ChatGroq](integrations/llamaindex/chat-models/chatgroq.md) + * [Embeddings](integrations/llamaindex/embeddings/README.md) + * [Azure OpenAI Embeddings](integrations/llamaindex/embeddings/azure-openai-embeddings.md) + * [OpenAI Embedding](integrations/llamaindex/embeddings/openai-embedding.md) + * [Engine](integrations/llamaindex/engine/README.md) + * [Query Engine](integrations/llamaindex/engine/query-engine.md) + * [Simple Chat Engine](integrations/llamaindex/engine/simple-chat-engine.md) + * [Context Chat Engine](integrations/llamaindex/engine/context-chat-engine.md) + * [Sub-Question Query Engine](integrations/llamaindex/engine/sub-question-query-engine.md) + * [Response Synthesizer](integrations/llamaindex/response-synthesizer/README.md) + * [Refine](integrations/llamaindex/response-synthesizer/refine.md) + * [Compact And Refine](integrations/llamaindex/response-synthesizer/compact-and-refine.md) + * [Simple Response Builder](integrations/llamaindex/response-synthesizer/simple-response-builder.md) + * [Tree Summarize](integrations/llamaindex/response-synthesizer/tree-summarize.md) + * [Tools](integrations/llamaindex/tools/README.md) + * [Query Engine Tool](integrations/llamaindex/tools/query-engine-tool.md) + * [Vector Stores](integrations/llamaindex/vector-stores/README.md) + * [Pinecone](integrations/llamaindex/vector-stores/pinecone.md) + * [SimpleStore](integrations/llamaindex/vector-stores/queryengine-tool.md) + * [Utilities](integrations/utilities/README.md) + * [Custom JS Function](integrations/utilities/custom-js-function.md) + * [Set/Get Variable](integrations/utilities/set-get-variable.md) + * [If Else](integrations/utilities/if-else.md) + * [Sticky Note](integrations/utilities/sticky-note.md) + * [External Integrations](integrations/3rd-party-platform-integration/README.md) + * [Zapier Zaps](integrations/3rd-party-platform-integration/zapier-zaps.md) + * [Open WebUI](integrations/3rd-party-platform-integration/open-webui.md) + * [Streamlit](integrations/3rd-party-platform-integration/streamlit.md) +* [Migration Guide](migration-guide/README.md) + * [Cloud Migration](migration-guide/cloud-migration.md) + * [v1.3.0 Migration Guide](migration-guide/v1.3.0-migration-guide.md) + * [v1.4.3 Migration Guide](migration-guide/v1.4.3-migration-guide.md) + * [v2.1.4 Migration Guide](migration-guide/v2.1.4-migration-guide.md) +* [Tutorials](tutorials/README.md) + * [RAG](tutorials/rag.md) + * [Agentic RAG](tutorials/agentic-rag.md) + * [SQL Agent](tutorials/sql-agent.md) + * [Agent as Tool](tutorials/agent-as-tool.md) + * [Interacting with API](tutorials/interacting-with-api.md) + * [Tools & MCP](tutorials/tools-and-mcp.md) + * [Structured Output](tutorials/structured-output.md) + * [Human In The Loop](tutorials/human-in-the-loop.md) + * [Deep Research](tutorials/deep-research.md) + * [Customer Support](tutorials/customer-support.md) + * [Supervisor and Workers](tutorials/supervisor-and-workers.md) + +## Flowise + +* [Flowise GitHub](https://github.com/FlowiseAI) +* [Flowise Cloud](https://flowiseai.com/join) diff --git a/fr/api-reference/README.md b/fr/api-reference/README.md new file mode 100644 index 00000000..e14d5efc --- /dev/null +++ b/fr/api-reference/README.md @@ -0,0 +1,17 @@ +# Référence API + +En utilisant l'API publique de Flowise, vous pouvez exécuter de manière programmatique de nombreuses tâches similaires à celles que vous pouvez réaliser dans l'interface graphique. Cette section présente l'API REST de Flowise. + +* [Assistants](assistants.md) +* [Pièces jointes](attachments.md) +* [Message de chat](chat-message.md) +* [Flux de chat](chatflows.md) +* [Stockage de documents](document-store.md) +* [Retour d'information](feedback.md) +* [Prospects](leads.md) +* [Ping](ping.md) +* [Prédiction](prediction.md) +* [Outils](tools.md) +* [Historique d'Upsert](upsert-history.md) +* [Variables](variables.md) +* [Upsert de vecteur](vector-upsert.md) diff --git a/fr/api-reference/assistants.md b/fr/api-reference/assistants.md new file mode 100644 index 00000000..37fb8bcc --- /dev/null +++ b/fr/api-reference/assistants.md @@ -0,0 +1,17 @@ +# Assistants + +{% openapi-operation spec="flowiseai-api" path="/assistants" method="post" %} +[Broken link](broken-reference) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/assistants/{id}" method="get" %} +[Broken link](broken-reference) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/assistants/{id}" method="put" %} +[Broken link](broken-reference) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/assistants/{id}" method="delete" %} +[Broken link](broken-reference) +{% endopenapi-operation %} diff --git a/fr/api-reference/attachments.md b/fr/api-reference/attachments.md new file mode 100644 index 00000000..7d821c96 --- /dev/null +++ b/fr/api-reference/attachments.md @@ -0,0 +1,5 @@ +# Attachments + +{% openapi-operation spec="flowiseai-api" path="/attachments/{chatflowId}/{chatId}" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250622%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250622T114123Z&X-Amz-Expires=172800&X-Amz-Signature=5dd4b3343a1c2a841e205191e3862f26b17fd1af871cb148ed64f6c7ad8048f1&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/chat-message.md b/fr/api-reference/chat-message.md new file mode 100644 index 00000000..b2e51b45 --- /dev/null +++ b/fr/api-reference/chat-message.md @@ -0,0 +1,9 @@ +# Chat Message + +{% openapi-operation spec="flowiseai-api" path="/chatmessage/{id}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/chatmessage/{id}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/chatflows.md b/fr/api-reference/chatflows.md new file mode 100644 index 00000000..41ee4f75 --- /dev/null +++ b/fr/api-reference/chatflows.md @@ -0,0 +1,25 @@ +# Chatflows + +{% openapi-operation spec="flowiseai-api" path="/chatflows" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/chatflows/{id}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/chatflows/apikey/{apikey}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/chatflows/{id}" method="put" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/chatflows/{id}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/chatflows" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/document-store.md b/fr/api-reference/document-store.md new file mode 100644 index 00000000..5a0b6a14 --- /dev/null +++ b/fr/api-reference/document-store.md @@ -0,0 +1,53 @@ +# Document Store + +{% openapi-operation spec="flowiseai-api" path="/document-store/store/{id}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/chunks/{storeId}/{loaderId}/{pageNo}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/store" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/upsert/{id}" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/refresh/{id}" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/vectorstore/query" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/store" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/chunks/{storeId}/{loaderId}/{chunkId}" method="put" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/store/{id}" method="put" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/store/{id}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/chunks/{storeId}/{loaderId}/{chunkId}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/loader/{storeId}/{loaderId}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/document-store/vectorstore/{id}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/feedback.md b/fr/api-reference/feedback.md new file mode 100644 index 00000000..dda7c9f3 --- /dev/null +++ b/fr/api-reference/feedback.md @@ -0,0 +1,13 @@ +# Feedback + +{% openapi-operation spec="flowiseai-api" path="/feedback/{id}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/feedback" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/feedback/{id}" method="put" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/leads.md b/fr/api-reference/leads.md new file mode 100644 index 00000000..ff665259 --- /dev/null +++ b/fr/api-reference/leads.md @@ -0,0 +1,9 @@ +# Leads + +{% openapi-operation spec="flowiseai-api" path="/leads/{id}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/leads" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/ping.md b/fr/api-reference/ping.md new file mode 100644 index 00000000..f775a169 --- /dev/null +++ b/fr/api-reference/ping.md @@ -0,0 +1,5 @@ +# Ping + +{% openapi-operation spec="flowiseai-api" path="/ping" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250622%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250622T114123Z&X-Amz-Expires=172800&X-Amz-Signature=5dd4b3343a1c2a841e205191e3862f26b17fd1af871cb148ed64f6c7ad8048f1&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/prediction.md b/fr/api-reference/prediction.md new file mode 100644 index 00000000..025fa0c9 --- /dev/null +++ b/fr/api-reference/prediction.md @@ -0,0 +1,5 @@ +# Prediction + +{% openapi-operation spec="flowiseai-api" path="/prediction/{id}" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250622%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250622T113955Z&X-Amz-Expires=172800&X-Amz-Signature=ebc051053087decf7af1fc246ba6a1cffc92d068f8cb5f3a2f0deef9b8b229c4&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/tools.md b/fr/api-reference/tools.md new file mode 100644 index 00000000..e1d6f401 --- /dev/null +++ b/fr/api-reference/tools.md @@ -0,0 +1,21 @@ +# Tools + +{% openapi-operation spec="flowiseai-api" path="/tools" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/tools" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/tools/{id}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/tools/{id}" method="put" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/tools/{id}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/upsert-history.md b/fr/api-reference/upsert-history.md new file mode 100644 index 00000000..5d1954f2 --- /dev/null +++ b/fr/api-reference/upsert-history.md @@ -0,0 +1,9 @@ +# Upsert History + +{% openapi-operation spec="flowiseai-api" path="/upsert-history/{id}" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/upsert-history/{id}" method="patch" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/variables.md b/fr/api-reference/variables.md new file mode 100644 index 00000000..b963ea18 --- /dev/null +++ b/fr/api-reference/variables.md @@ -0,0 +1,17 @@ +# Variables + +{% openapi-operation spec="flowiseai-api" path="/variables" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/variables" method="get" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/variables/{id}" method="put" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} + +{% openapi-operation spec="flowiseai-api" path="/variables/{id}" method="delete" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250717%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250717T144327Z&X-Amz-Expires=172800&X-Amz-Signature=b4e9a97d35bf5ed41bad9cb6ac39ae59371100f618dc64f1d01f6adf1f6419a3&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/api-reference/vector-upsert.md b/fr/api-reference/vector-upsert.md new file mode 100644 index 00000000..a037f851 --- /dev/null +++ b/fr/api-reference/vector-upsert.md @@ -0,0 +1,5 @@ +# Vector Upsert + +{% openapi-operation spec="flowiseai-api" path="/vector/upsert/{id}" method="post" %} +[OpenAPI flowiseai-api](https://gitbook-x-prod-openapi.4401d86825a13bf607936cc3a9f3897a.r2.cloudflarestorage.com/raw/238edaa326f0ca4057047699b251f09b359ab0c2a5d5201f7095b100792cf411.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=dce48141f43c0191a2ad043a6888781c%2F20250622%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250622T114123Z&X-Amz-Expires=172800&X-Amz-Signature=5dd4b3343a1c2a841e205191e3862f26b17fd1af871cb148ed64f6c7ad8048f1&X-Amz-SignedHeaders=host&x-amz-checksum-mode=ENABLED&x-id=GetObject) +{% endopenapi-operation %} diff --git a/fr/cli-reference/README.md b/fr/cli-reference/README.md new file mode 100644 index 00000000..6bcdf65e --- /dev/null +++ b/fr/cli-reference/README.md @@ -0,0 +1,5 @@ +# Référence CLI + +Avec Flowise CLI, vous pouvez exécuter de manière programmatique de nombreuses tâches identiques à celles que vous pouvez effectuer dans l'interface graphique. Cette section présente l'interface de ligne de commande Flowise. + +- [Utilisateur](user.md) \ No newline at end of file diff --git a/fr/cli-reference/user.md b/fr/cli-reference/user.md new file mode 100644 index 00000000..22fff1d0 --- /dev/null +++ b/fr/cli-reference/user.md @@ -0,0 +1,58 @@ +# Utilisateur + +## Lister les e-mails des utilisateurs + +Cette commande vous permet de lister tous les e-mails des utilisateurs enregistrés dans le système. + +### Utilisation locale + +```bash +pnpm user +``` + +Ou si vous utilisez npm + +```bash +npx flowise user +``` + +### Utilisation de Docker + +Si vous exécutez Flowise dans un conteneur Docker, utilisez la commande suivante : + +```bash +docker exec -it FLOWISE_CONTAINER_NAME pnpm user +``` + +Replace `FLOWISE_CONTAINER_NAME` par le nom réel de votre conteneur Flowise. + +## Réinitialiser le mot de passe de l'utilisateur + +Cette commande vous permet de réinitialiser le mot de passe d'un utilisateur. + +### Utilisation locale + +```bash +pnpm user --email "admin@admin.com" --password "myPassword1!" +``` + +Ou si vous utilisez npm + +``` +npx flowise user --email "admin@admin.com" --password "myPassword1!" +``` + +### Utilisation de Docker + +Si vous exécutez Flowise dans un conteneur Docker, utilisez la commande suivante : + +```bash +docker exec -it FLOWISE_CONTAINER_NAME pnpm user --email "admin@admin.com" --password "myPassword1!" +``` + +Replacez `FLOWISE_CONTAINER_NAME` par le nom de votre conteneur Flowise. + +### Paramètres + +* `--email` : L'adresse e-mail de l'utilisateur dont vous souhaitez réinitialiser le mot de passe +* `--password` : Le nouveau mot de passe à définir pour l'utilisateur diff --git a/fr/configuration/README.md b/fr/configuration/README.md new file mode 100644 index 00000000..cca04d4f --- /dev/null +++ b/fr/configuration/README.md @@ -0,0 +1,23 @@ +--- +description: Apprenez à configurer et à exécuter des instances Flowise +--- + +# Configuration + +*** + +Cette section vous guidera à travers diverses options de configuration pour personnaliser vos instances Flowise pour les environnements de développement, de test et de production. + +Nous fournirons également des guides détaillés pour déployer Flowise sur différentes options de Plateforme en tant que Service (PaaS), garantissant un déploiement fluide et réussi. + +## Guides + +* [Auth](authorization/) +* [Databases](databases.md) +* [Deployment](deployment/) +* [Environment Variables](environment-variables.md) +* [Rate Limit](rate-limit.md) +* [Proxy](running-flowise-behind-company-proxy.md) +* [SSO](sso.md) +* [Queue Mode](running-flowise-using-queue.md) +* [Production Ready](running-in-production.md) \ No newline at end of file diff --git a/fr/configuration/authorization/README.md b/fr/configuration/authorization/README.md new file mode 100644 index 00000000..dfb7cc62 --- /dev/null +++ b/fr/configuration/authorization/README.md @@ -0,0 +1,16 @@ +--- +description: Apprenez à sécuriser vos instances Flowise +--- + +# Auth + +*** + +Cette section vous guide dans la configuration de la sécurité avec Flowise, en mettant l'accent sur les mécanismes d'authentification au niveau de l'application et des chatflows. + +En mettant en œuvre une authentification robuste, vous pouvez protéger vos instances Flowise et garantir que seuls les utilisateurs autorisés peuvent accéder à vos chatflows et interagir avec eux. + +## Méthodes prises en charge + +* [Niveau application](app-level.md) +* [Niveau chatflow](chatflow-level.md) \ No newline at end of file diff --git a/fr/configuration/authorization/app-level.md b/fr/configuration/authorization/app-level.md new file mode 100644 index 00000000..121d4f16 --- /dev/null +++ b/fr/configuration/authorization/app-level.md @@ -0,0 +1,123 @@ +--- +description: Apprenez à configurer le contrôle d'accès au niveau de l'application pour vos instances Flowise +--- + +# Application + +*** + +## Email & Mot de passe + +À partir de la version v3.0.1, une nouvelle méthode d'authentification a été introduite. Flowise utilise un [**système d'authentification basé sur Passport.js**](https://www.passportjs.org/) avec des tokens JWT stockés dans des cookies sécurisés HTTP-only. Lorsqu'un utilisateur se connecte, le système valide son email/mot de passe contre la base de données en utilisant une comparaison de hachage bcrypt, puis génère deux tokens JWT : un token d'accès à durée limitée (par défaut 60 minutes) et un token de rafraîchissement à longue durée (par défaut 90 jours). Ces tokens sont stockés en tant que cookies sécurisés. Pour les requêtes suivantes, le système extrait le JWT des cookies, valide la signature et les revendications en utilisant la stratégie JWT de Passport, et vérifie que la session utilisateur existe toujours. Le système prend également en charge le rafraîchissement automatique des tokens lorsque le token d'accès expire, et maintient les sessions en utilisant soit Redis, soit le stockage de base de données selon la configuration. + +Pour les utilisateurs existants qui utilisaient [Nom d'utilisateur & Mot de passe (Obsolète)](app-level.md#username-and-password-deprecated), vous devez configurer un nouveau compte administrateur. Pour éviter les revendications de propriété non autorisées, vous devez d'abord vous authentifier en utilisant le nom d'utilisateur et le mot de passe existants configurés comme `FLOWISE_USERNAME` et `FLOWISE_PASSWORD`. + +
+ +Les variables d'environnement suivantes peuvent être modifiées : + +### URL de l'application + +* `APP_URL` - L'URL de votre application Flowise hébergée. Par défaut `http://localhost:3000` + +### Configuration des variables d'environnement JWT + +Pour configurer les paramètres d'authentification JWT de Flowise, l'utilisateur peut modifier les variables d'environnement suivantes : + +* `JWT_AUTH_TOKEN_SECRET` - La clé secrète pour signer les tokens d'accès +* `JWT_REFRESH_TOKEN_SECRET` - Secret pour les tokens de rafraîchissement (par défaut, utilise le secret du token d'authentification s'il n'est pas défini) +* `JWT_TOKEN_EXPIRY_IN_MINUTES` - Durée de vie du token d'accès (par défaut : 60 minutes) +* `JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES` - Durée de vie du token de rafraîchissement (par défaut : 129 600 minutes ou 90 jours) +* `JWT_AUDIENCE` - Revendication d'audience pour la validation du token (par défaut : 'AUDIENCE') +* `JWT_ISSUER` - Revendication d'émetteur pour la validation du token (par défaut : 'ISSUER') +* `EXPRESS_SESSION_SECRET` - Secret de chiffrement de session (par défaut : 'flowise') +* `EXPIRE_AUTH_TOKENS_ON_RESTART` - Défini sur 'true' pour invalider tous les tokens au redémarrage du serveur (utile pour le développement) + +### Configuration SMTP pour les emails + +Configurez ces variables pour activer la fonctionnalité d'email pour les réinitialisations de mot de passe et les notifications : + +* `SMTP_HOST` - Le nom d'hôte de votre serveur SMTP (par exemple, `smtp.gmail.com`, `smtp.host.com`) +* `SMTP_PORT` - Le numéro de port pour la connexion SMTP (valeurs courantes : `587` pour TLS, `465` pour SSL, `25` pour non chiffré) +* `SMTP_USER` - Nom d'utilisateur pour l'authentification SMTP (généralement votre adresse email) +* `SMTP_PASSWORD` - Mot de passe ou mot de passe spécifique à l'application pour l'authentification SMTP +* `SMTP_SECURE` - Défini sur `true` pour le chiffrement SSL/TLS, `false` pour les connexions non chiffrées +* `ALLOW_UNAUTHORIZED_CERTS` - Défini sur `true` pour autoriser les certificats auto-signés (non recommandé pour la production) +* `SENDER_EMAIL` - L'adresse email "de" qui apparaîtra sur les emails sortants + +### Configuration de la sécurité et des tokens + +Ces variables contrôlent la sécurité de l'authentification, l'expiration des tokens et le hachage des mots de passe : + +* `PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS` - Temps d'expiration pour les tokens de réinitialisation de mot de passe (par défaut : 15 minutes) +* `PASSWORD_SALT_HASH_ROUNDS` - Nombre de tours de sel bcrypt pour le hachage des mots de passe (par défaut : 10, plus élevé = plus sécurisé mais plus lent) +* `TOKEN_HASH_SECRET` - Clé secrète utilisée pour hacher les tokens et les données sensibles (utilisez une chaîne forte et aléatoire) + +### Meilleures pratiques de sécurité + +* Utilisez des valeurs fortes et uniques pour `TOKEN_HASH_SECRET` et stockez-les en toute sécurité +* Pour la production, utilisez `SMTP_SECURE=true` et `ALLOW_UNAUTHORIZED_CERTS=false` +* Définissez des temps d'expiration de token appropriés en fonction de vos exigences de sécurité +* Utilisez des valeurs plus élevées pour `PASSWORD_SALT_HASH_ROUNDS` (12-15) pour une meilleure sécurité en production + +## Nom d'utilisateur et mot de passe (Obsolète) + +L'autorisation au niveau de l'application protège votre instance Flowise par un nom d'utilisateur et un mot de passe. Cela empêche vos applications d'être accessibles par quiconque lorsqu'elles sont déployées en ligne. + +
+ +### Comment définir un nom d'utilisateur et un mot de passe + +#### Npm + +1. Installez Flowise + +```bash +npm install -g flowise +``` + +2. Démarrer Flowise avec nom d'utilisateur et mot de passe + +```bash +npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234 +``` + +3. Ouvrez [http://localhost:3000](http://localhost:3000) + +#### Docker + +1. Accédez au dossier `docker` + +``` +cd docker +``` + +2. Créez un fichier `.env` et spécifiez le `PORT`, le `FLOWISE_USERNAME` et le `FLOWISE_PASSWORD` + +```sh +PORT=3000 +FLOWISE_USERNAME=user +FLOWISE_PASSWORD=1234 +``` + +3. Passez `FLOWISE_USERNAME` et `FLOWISE_PASSWORD` au fichier `docker-compose.yml` : + +``` +environment: + - PORT=${PORT} + - FLOWISE_USERNAME=${FLOWISE_USERNAME} + - FLOWISE_PASSWORD=${FLOWISE_PASSWORD} +``` + +4. `docker compose up -d` +5. Ouvrez [http://localhost:3000](http://localhost:3000) +6. Vous pouvez arrêter les conteneurs avec `docker compose stop` + +#### Clonage Git + +Pour activer l'authentification au niveau de l'application, ajoutez `FLOWISE_USERNAME` et `FLOWISE_PASSWORD` au fichier `.env` dans `packages/server` : + +``` +FLOWISE_USERNAME=user +FLOWISE_PASSWORD=1234 +``` diff --git a/fr/configuration/authorization/chatflow-level.md b/fr/configuration/authorization/chatflow-level.md new file mode 100644 index 00000000..5083a11d --- /dev/null +++ b/fr/configuration/authorization/chatflow-level.md @@ -0,0 +1,34 @@ +--- +description: Apprenez à configurer le contrôle d'accès au niveau du chatflow pour vos instances Flowise +--- + +# Flows + +*** + +Après avoir construit un chatflow / agentflow, par défaut, votre flux est accessible au public. Quiconque ayant accès à l'ID du Chatflow peut exécuter des prédictions via Embed ou API. + +Dans les cas où vous souhaitez permettre à certaines personnes d'accéder et d'interagir avec celui-ci, vous pouvez le faire en attribuant une clé API pour ce chatflow spécifique. + +## Clé API + +Dans le tableau de bord, accédez à la section Clés API, et vous devriez voir une DefaultKey créée. Vous pouvez également ajouter ou supprimer des clés. + +
+ +## Chatflow + +Accédez au chatflow, et maintenant vous pouvez sélectionner la clé API que vous souhaitez utiliser pour protéger le chatflow. + +
+ +Après avoir attribué une clé API, on ne peut accéder à l'API du chatflow que lorsque l'en-tête Authorization est fourni avec la clé API correcte spécifiée lors d'un appel HTTP. + +```json +"Authorization": "Bearer " +``` + +Un exemple d'appel de l'API en utilisant POSTMAN + +
+ diff --git a/fr/configuration/databases.md b/fr/configuration/databases.md new file mode 100644 index 00000000..83516e18 --- /dev/null +++ b/fr/configuration/databases.md @@ -0,0 +1,136 @@ +--- +description: Apprenez à connecter votre instance Flowise à une base de données +--- + +# Bases de données + +--- + +## Configuration + +Flowise prend en charge 4 types de bases de données : + +- SQLite +- MySQL +- PostgreSQL +- MariaDB + +### SQLite (Par défaut) + +SQLite sera la base de données par défaut. Ces bases de données peuvent être configurées avec les variables d'environnement suivantes : + +```sh +DATABASE_TYPE=sqlite +DATABASE_PATH=/root/.flowise #your preferred location +``` + +Un fichier `database.sqlite` sera créé et enregistré dans le chemin spécifié par `DATABASE_PATH`. Si aucun chemin n'est spécifié, le chemin de stockage par défaut sera dans votre répertoire personnel -> .flowise + +**Remarque :** Si aucune des variables d'environnement n'est spécifiée, SQLite sera le choix de base de données par défaut. + +### MySQL + +```sh +DATABASE_TYPE=mysql +DATABASE_PORT=3306 +DATABASE_HOST=localhost +DATABASE_NAME=flowise +DATABASE_USER=user +DATABASE_PASSWORD=123 +``` + +### PostgreSQL + +```sh +DATABASE_TYPE=postgres +DATABASE_PORT=5432 +DATABASE_HOST=localhost +DATABASE_NAME=flowise +DATABASE_USER=user +DATABASE_PASSWORD=123 +PGSSLMODE=require +``` + +### MariaDB + +```bash +DATABASE_TYPE="mariadb" +DATABASE_PORT="3306" +DATABASE_HOST="localhost" +DATABASE_NAME="flowise" +DATABASE_USER="flowise" +DATABASE_PASSWORD="mypassword" +``` + +### Comment utiliser les bases de données Flowise SQLite et MySQL/MariaDB + +{% embed url="https://youtu.be/R-6uV1Cb8I8" %} + +## Sauvegarde + +1. Fermez l'application FlowiseAI. +2. Assurez-vous que la connexion à la base de données avec d'autres applications est désactivée. +3. Sauvegardez votre base de données. +4. Testez la base de données de sauvegarde. + +### SQLite + +1. Renommez le nom du fichier. + + Windows: + + ```bash + rename "DATABASE_PATH\database.sqlite" "DATABASE_PATH\BACKUP_FILE_NAME.sqlite" + ``` + +Linux: + + ```bash + mv DATABASE_PATH/database.sqlite DATABASE_PATH/BACKUP_FILE_NAME.sqlite + ``` + +2. Sauvegarder la base de données. + + Windows: + + ```bash + copy DATABASE_PATH\BACKUP_FILE_NAME.sqlite DATABASE_PATH\database.sqlite + ``` + +Linux: + + ```bash + cp DATABASE_PATH/BACKUP_FILE_NAME.sqlite DATABASE_PATH/database.sqlite + ``` + +3. Tester la base de données de sauvegarde en exécutant Flowise. + +### PostgreSQL + +1. Sauvegarder la base de données. + + ```bash + pg_dump -U USERNAME -h HOST -p PORT -d DATABASE_NAME -f /PATH/TO/BACKUP_FILE_NAME.sql + ``` + +2. Entrez le mot de passe de la base de données. +3. Créez une base de données de test. + ```bash + psql -U USERNAME -h HOST -p PORT -d TEST_DATABASE_NAME -f /PATH/TO/BACKUP_FILE_NAME.sql + ``` +4. Testez la base de données de sauvegarde en exécutant Flowise avec le fichier `.env` modifié pour pointer vers la base de données de sauvegarde. + +### MySQL & MariaDB + +1. Base de données de sauvegarde. + + ```bash + mysqldump -u USERNAME -p DATABASE_NAME > BACKUP_FILE_NAME.sql + ``` + +2. Entrez le mot de passe de la base de données. +3. Créez une base de données de test. + ```bash + mysql -u USERNAME -p TEST_DATABASE_NAME < BACKUP_FILE_NAME.sql + ``` +4. Testez la base de données de sauvegarde en exécutant Flowise avec le fichier `.env` modifié pour pointer vers la base de données de sauvegarde. \ No newline at end of file diff --git a/fr/configuration/deployment/README.md b/fr/configuration/deployment/README.md new file mode 100644 index 00000000..ff3ffb65 --- /dev/null +++ b/fr/configuration/deployment/README.md @@ -0,0 +1,40 @@ +--- +description: Apprenez à déployer Flowise dans le cloud +--- + +# Déploiement + +*** + +Flowise est conçu avec une architecture indépendante de la plateforme, garantissant une compatibilité avec une large gamme d'environnements de déploiement pour répondre à vos besoins d'infrastructure. + +## Machine locale + +Pour déployer Flowise localement, suivez notre guide [Commencer](../../getting-started/). + +## Fournisseurs de cloud modernes + +Les plateformes cloud modernes privilégient l'automatisation et se concentrent sur les flux de travail des développeurs, simplifiant ainsi la gestion du cloud et la maintenance continue. + +Cela réduit l'expertise technique nécessaire, mais peut limiter le niveau de personnalisation que vous avez sur l'infrastructure sous-jacente. + +* [Elestio](https://elest.io/open-source/flowiseai) +* [Hugging Face](hugging-face.md) +* [Railway](railway.md) +* [Render](render.md) +* [Replit](replit.md) +* [RepoCloud](https://repocloud.io/details/?app\_id=29) +* [Sealos](sealos.md) +* [Zeabur](zeabur.md) + +## Fournisseurs de cloud établis + +Les fournisseurs de cloud établis, en revanche, nécessitent un niveau d'expertise technique plus élevé pour gérer et optimiser selon vos besoins spécifiques. + +Cette complexité, cependant, offre également une plus grande flexibilité et un meilleur contrôle sur votre environnement cloud. + +* [AWS](aws.md) +* [Azure](azure.md) +* [DigitalOcean](digital-ocean.md) +* [GCP](gcp.md) +* [Kubernetes avec Helm](https://artifacthub.io/packages/helm/cowboysysop/flowise) \ No newline at end of file diff --git a/fr/configuration/deployment/aws.md b/fr/configuration/deployment/aws.md new file mode 100644 index 00000000..2d57dc61 --- /dev/null +++ b/fr/configuration/deployment/aws.md @@ -0,0 +1,336 @@ +--- +description: Apprenez à déployer Flowise sur AWS +--- + +# AWS + +*** + +## Prérequis + +Cela nécessite une compréhension de base du fonctionnement d'AWS. + +Deux options sont disponibles pour déployer Flowise sur AWS : + +* [Déployer sur ECS en utilisant CloudFormation](aws.md#deploy-on-ecs-using-cloudformation) +* [Configurer manuellement une instance EC2](aws.md#launch-ec2-instance) + +## Déployer sur ECS en utilisant CloudFormation + +Le modèle CloudFormation est disponible ici : [https://gist.github.com/MrHertal/549b31a18e350b69c7200ae8d26ed691](https://gist.github.com/MrHertal/549b31a18e350b69c7200ae8d26ed691) + +Il déploie Flowise sur un cluster ECS exposé via ELB. + +Il a été inspiré par cette architecture de référence : [https://github.com/aws-samples/ecs-refarch-cloudformation](https://github.com/aws-samples/ecs-refarch-cloudformation) + +N'hésitez pas à modifier ce modèle pour adapter des éléments tels que la version de l'image Flowise, les variables d'environnement, etc. + +Exemple de commande pour déployer Flowise en utilisant le [CLI AWS](https://aws.amazon.com/fr/cli/) : + +```bash +aws cloudformation create-stack --stack-name flowise --template-body file://flowise-cloudformation.yml --capabilities CAPABILITY_IAM +``` + +Après le déploiement, l'URL de votre application Flowise est disponible dans les sorties de la pile CloudFormation. + +## Déployer sur ECS en utilisant Terraform + +Les fichiers Terraform (`variables.tf`, `main.tf`) sont disponibles dans ce dépôt GitHub : [terraform-flowise-setup](https://github.com/huiseo/terraform-flowise-setup/tree/main). + +Cette configuration déploie Flowise sur un cluster ECS exposé via un Application Load Balancer (ALB). Elle est basée sur les meilleures pratiques AWS pour les déploiements ECS. + +Vous pouvez modifier le modèle Terraform pour ajuster : + +* La version de l'image Flowise +* Les variables d'environnement +* Les configurations des ressources (CPU, mémoire, etc.) + +### Exemples de commandes pour le déploiement : + +1. **Initialiser Terraform :** + +```bash +terraform init +terraform apply +terraform destroy +``` +```markdown +## Lancer une instance EC2 + +1. Dans le tableau de bord EC2, cliquez sur **Lancer une instance** + +
+ +2. Faites défiler vers le bas et **Créez une nouvelle paire de clés** si vous n'en avez pas + +
+ +3. Remplissez le nom de la paire de clés de votre choix. Pour Windows, nous utiliserons `.ppk` et PuTTY pour nous connecter à l'instance. Pour Mac et Linux, nous utiliserons `.pem` et OpenSSH + +
+ +4. Cliquez sur **Créer une paire de clés** et sélectionnez un chemin d'emplacement pour enregistrer le fichier `.ppk` +5. Ouvrez la barre latérale gauche et ouvrez un nouvel onglet à partir de **Groupes de sécurité**. Ensuite, **Créez un groupe de sécurité** + +
+ +6. Remplissez le nom et la description de votre groupe de sécurité. Ensuite, ajoutez ce qui suit aux Règles entrantes et **Créez un groupe de sécurité** + +
+ +7. Retournez au premier onglet (Lancer une instance EC2) et faites défiler vers le bas jusqu'à **Paramètres réseau**. Sélectionnez le groupe de sécurité que vous venez de créer + +
+ +8. Cliquez sur **Lancer l'instance**. Revenez au tableau de bord EC2, après quelques minutes, nous devrions voir une nouvelle instance opérationnelle [🎉](https://emojipedia.org/party-popper/) + +
+ +## Comment se connecter à votre instance (Windows) + +1. Pour Windows, nous allons utiliser PuTTY. Vous pouvez le télécharger [ici](https://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html). +2. Ouvrez PuTTY et remplissez le **HostName** avec le nom DNS IPv4 public de votre instance + +
+ +3. Dans la barre latérale gauche de la configuration de PuTTY, développez **SSH** et cliquez sur **Auth**. Cliquez sur Parcourir et sélectionnez le fichier `.ppk` que vous avez téléchargé précédemment. + +
+ +4. Cliquez sur **Ouvrir** et **Acceptez** le message contextuel + +
+ +5. Ensuite, connectez-vous en tant que `ec2-user` + +
+ +6. Vous êtes maintenant connecté à l'instance EC2 + +## Comment se connecter à votre instance (Mac et Linux) + +1. Ouvrez l'application Terminal sur votre Mac/Linux. +2. _(Optionnel)_ Modifiez les permissions du fichier de clé privée pour restreindre l'accès : +``````bash +chmod 400 /path/to/mykey.pem +``` + +3. Utilisez la commande `ssh` pour vous connecter à votre instance EC2, en spécifiant le nom d'utilisateur (`ec2-user`), le DNS public IPv4 et le chemin vers le fichier `.pem`. + +```bash +ssh -i /Users/username/Documents/mykey.pem ec2-user@ec2-123-45-678-910.compute-1.amazonaws.com +``` + +4. Appuyez sur Entrée, et si tout est configuré correctement, vous devriez établir avec succès une connexion SSH à votre instance EC2. + +## Installer Docker + +1. Appliquez les mises à jour en attente en utilisant la commande yum : + +```bash +sudo yum update +``` + +2. Recherchez le paquet Docker : + +```bash +sudo yum search docker +``` + +3. Obtenez des informations sur la version : + +```bash +sudo yum info docker +``` + +4. Installez Docker, exécutez : + +```bash +sudo yum install docker +``` + +5. Ajoutez l'appartenance au groupe pour l'utilisateur par défaut ec2-user afin de pouvoir exécuter toutes les commandes docker sans utiliser la commande sudo : + +```bash +sudo usermod -a -G docker ec2-user +id ec2-user +newgrp docker +``` + +6. Installer docker-compose : + +```bash +sudo yum install docker-compose-plugin +``` + +7. Activer le service docker au démarrage de l'AMI : + +```bash +sudo systemctl enable docker.service +``` + +8. Démarrez le service Docker : + +```bash +sudo systemctl start docker.service +``` + +## Installer Git + +```bash +sudo yum install git -y +``` + +## Configuration + +1. Clone le dépôt + +```bash +git clone https://github.com/FlowiseAI/Flowise.git +``` + +2. Accédez au dossier docker + +```bash +cd Flowise && cd docker +``` + +3. Créez un fichier `.env`. Vous pouvez utiliser votre éditeur préféré. J'utiliserai `nano` + +```bash +nano .env +``` + +
+ +4. Spécifiez les variables d'environnement : + +```sh +PORT=3000 +DATABASE_PATH=/root/.flowise +SECRETKEY_PATH=/root/.flowise +LOG_PATH=/root/.flowise/logs +BLOB_STORAGE_PATH=/root/.flowise/storage +``` + +5. Ensuite, appuyez sur `Ctrl + X` pour quitter, puis sur `Y` pour enregistrer le fichier +6. Exécutez docker compose + +```bash +docker compose up -d +``` + +7. Votre application est maintenant prête à l'adresse DNS IPv4 publique sur le port 3000 : + +``` +http://ec2-123-456-789.compute-1.amazonaws.com:3000 +``` + +8. Vous pouvez fermer l'application en : + +```bash +docker compose stop +``` + +9. Vous pouvez récupérer la dernière image en : + +```bash +docker pull flowiseai/flowise +``` +Voici la traduction en français : + +```markdown +Alternativement : +``````bash +docker-compose pull +docker-compose up --build -d +``` + +## Utilisation de NGINX + +Si vous souhaitez vous débarrasser du :3000 dans l'URL et avoir un domaine personnalisé, vous pouvez utiliser NGINX pour faire un reverse proxy du port 80 vers 3000. Ainsi, l'utilisateur pourra ouvrir l'application en utilisant votre domaine. Exemple : `http://yourdomain.com`. + +1. ```bash + sudo yum install nginx + ``` +2. ```bash + nginx -v + ``` +3.
sudo systemctl start nginx
+   
+4.
sudo nano /etc/nginx/conf.d/flowise.conf
+   
+5. Copiez-collez ce qui suit et modifiez-le avec votre domaine : + +```shell +server { + listen 80; + listen [::]:80; + server_name yourdomain.com; #Example: demo.flowiseai.com + location / { + proxy_pass http://localhost:3000; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} +``` + +press `Ctrl + X` pour quitter, et `Y` pour enregistrer le fichier + +6. ```bash + sudo systemctl restart nginx + ``` +7. Go to your DNS provider, and add a new A record. Name will be your domain name, and value will be the Public IPv4 address from EC2 instance + +
+ +6. You should now be able to open the app: `http://yourdomain.com`. + +### Install Certbot to have HTTPS + +If you like your app to have `https://yourdomain.com`. Here is how: + +1. For installing Certbot and enabling HTTPS on NGINX, we will rely on Python. So, first of all, let's set up a virtual environment: + +```bash +sudo python3 -m venv /opt/certbot/ +sudo /opt/certbot/bin/pip install --upgrade pip +``` + +2. Ensuite, exécutez cette commande pour installer Certbot : + +```bash +sudo /opt/certbot/bin/pip install certbot certbot-nginx +``` + +3. Maintenant, exécutez la commande suivante pour vous assurer que la commande `certbot` peut être exécutée : + +```bash +sudo ln -s /opt/certbot/bin/certbot /usr/bin/certbot +``` + +4. Enfin, exécutez la commande suivante pour obtenir un certificat et permettre à Certbot de modifier automatiquement la configuration NGINX, activant ainsi HTTPS : + +```bash +sudo certbot --nginx +``` + +5. Après avoir suivi l'assistant de génération de certificat, nous pourrons accéder à notre instance EC2 via HTTPS en utilisant l'adresse `https://yourdomain.com` + +## Configurer le renouvellement automatique + +Pour permettre à Certbot de renouveler automatiquement les certificats, il suffit d'ajouter une tâche cron en exécutant la commande suivante : + +```bash +echo "0 0,12 * * * root /opt/certbot/bin/python -c 'import random; import time; time.sleep(random.random() * 3600)' && sudo certbot renew -q" | sudo tee -a /etc/crontab > /dev/null +``` + +## Félicitations ! + +Vous avez réussi à configurer les applications Flowise sur une instance EC2 avec un certificat SSL sur votre domaine[🥳](https://emojipedia.org/partying-face/) diff --git a/fr/configuration/deployment/azure.md b/fr/configuration/deployment/azure.md new file mode 100644 index 00000000..9dc80cf9 --- /dev/null +++ b/fr/configuration/deployment/azure.md @@ -0,0 +1,601 @@ +--- +description: Apprenez à déployer Flowise sur Azure +--- + +# Azure + +*** + +## Flowise en tant que service d'application Azure avec Postgres : Utilisation de Terraform + +### Prérequis + +1. **Compte Azure** : Assurez-vous d'avoir un compte Azure avec un abonnement actif. Si vous n'en avez pas, inscrivez-vous sur [Azure Portal](https://portal.azure.com/). +2. **Terraform** : Installez Terraform CLI sur votre machine. Téléchargez-le depuis [le site de Terraform](https://www.terraform.io/downloads.html). +3. **Azure CLI** : Installez Azure CLI. Les instructions se trouvent sur la [page de documentation d'Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli). + +### Configuration de votre environnement + +1. **Connexion à Azure** : Ouvrez votre terminal ou votre invite de commande et connectez-vous à Azure CLI en utilisant : + +```bash +az login --tenant --use-device-code +``` + +Suivez les instructions pour compléter le processus de connexion. + +2. **Configurer l'abonnement** : Après vous être connecté, configurez l'abonnement Azure en utilisant : + +```bash +az account set --subscription +``` + +3. **Initialiser Terraform** : + +Créez un fichier `terraform.tfvars` dans votre répertoire de projet Terraform, s'il n'est pas déjà présent, et ajoutez le contenu suivant : + +```hcl +subscription_name = "subscrpiton_name" +subscription_id = "subscription id" +project_name = "webapp_name" +db_username = "PostgresUserName" +db_password = "strongPostgresPassword" +flowise_secretkey_overwrite = "longandStrongSecretKey" +webapp_ip_rules = [ + { + name = "AllowedIP" + ip_address = "X.X.X.X/32" + headers = null + virtual_network_subnet_id = null + subnet_id = null + service_tag = null + priority = 300 + action = "Allow" + } +] +postgres_ip_rules = { + "ValbyOfficeIP" = "X.X.X.X" + // Add more key-value pairs as needed +} +source_image = "flowiseai/flowise:latest" +tagged_image = "flow:v1" +``` + +Remplacez les espaces réservés par des valeurs réelles pour votre configuration. + +La structure de l'arborescence des fichiers est la suivante : + +``` +flow +├── database.tf +├── main.tf +├── network.tf +├── output.tf +├── providers.tf +├── terraform.tfvars +├── terraform.tfvars.example +├── variables.tf +├── webapp.tf +├── .gitignore // ignore your .tfvars and .lock.hcf, .terraform + +``` + +Chaque fichier `.tf` dans la configuration Terraform contient probablement un aspect différent de l'infrastructure en tant que code : + +
+ +`database.tf` définirait la configuration pour la base de données Postgres. + +```yaml + +// database.tf + +// Database instance +resource "azurerm_postgresql_flexible_server" "postgres" { + name = "postgresql-${var.project_name}" + location = azurerm_resource_group.rg.location + resource_group_name = azurerm_resource_group.rg.name + sku_name = "GP_Standard_D2s_v3" + storage_mb = 32768 + version = "11" + delegated_subnet_id = azurerm_subnet.dbsubnet.id + private_dns_zone_id = azurerm_private_dns_zone.postgres.id + backup_retention_days = 7 + geo_redundant_backup_enabled = false + auto_grow_enabled = false + administrator_login = var.db_username + administrator_password = var.db_password + zone = "2" + + lifecycle { + prevent_destroy = false + } +} + +// Firewall +resource "azurerm_postgresql_flexible_server_firewall_rule" "pg_firewall" { + for_each = var.postgres_ip_rules + name = each.key + server_id = azurerm_postgresql_flexible_server.postgres.id + start_ip_address = each.value + end_ip_address = each.value +} + +// Database +resource "azurerm_postgresql_flexible_server_database" "production" { + name = "production" + server_id = azurerm_postgresql_flexible_server.postgres.id + charset = "UTF8" + collation = "en_US.utf8" + + # prevent the possibility of accidental data loss + lifecycle { + prevent_destroy = false + } +} + +// Transport off +resource "azurerm_postgresql_flexible_server_configuration" "postgres_config" { + name = "require_secure_transport" + server_id = azurerm_postgresql_flexible_server.postgres.id + value = "off" +} +``` + +
+ +
+ +Le fichier `main.tf` pourrait être le fichier de configuration principal qui inclut la configuration du fournisseur Azure et définit le groupe de ressources Azure. + +```yaml +// main.tf +resource "random_string" "resource_code" { + length = 5 + special = false + upper = false +} + +// resource group +resource "azurerm_resource_group" "rg" { + location = var.resource_group_location + name = "rg-${var.project_name}" +} + +// Storage Account +resource "azurerm_storage_account" "sa" { + name = "${var.subscription_name}${random_string.resource_code.result}" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + account_tier = "Standard" + account_replication_type = "LRS" + + blob_properties { + versioning_enabled = true + } + +} + +// File share +resource "azurerm_storage_share" "flowise-share" { + name = "flowise" + storage_account_name = azurerm_storage_account.sa.name + quota = 50 +} + +``` + +
+ +
+ +`network.tf` inclurait des ressources réseau telles que des réseaux virtuels, des sous-réseaux et des groupes de sécurité réseau. + +```yaml +// network.tf + +// Vnet +resource "azurerm_virtual_network" "vnet" { + name = "vn-${var.project_name}" + location = azurerm_resource_group.rg.location + resource_group_name = azurerm_resource_group.rg.name + address_space = ["10.3.0.0/16"] +} + +resource "azurerm_subnet" "dbsubnet" { + name = "db-subnet-${var.project_name}" + resource_group_name = azurerm_resource_group.rg.name + virtual_network_name = azurerm_virtual_network.vnet.name + address_prefixes = ["10.3.1.0/24"] + private_endpoint_network_policies_enabled = true + delegation { + name = "delegation" + service_delegation { + name = "Microsoft.DBforPostgreSQL/flexibleServers" + } + } + lifecycle { + ignore_changes = [ + service_endpoints, + delegation + ] + } +} + +resource "azurerm_subnet" "webappsubnet" { + + name = "web-app-subnet-${var.project_name}" + resource_group_name = azurerm_resource_group.rg.name + virtual_network_name = azurerm_virtual_network.vnet.name + address_prefixes = ["10.3.8.0/24"] + + delegation { + name = "delegation" + service_delegation { + name = "Microsoft.Web/serverFarms" + } + } + lifecycle { + ignore_changes = [ + delegation + ] + } +} + +resource "azurerm_private_dns_zone" "postgres" { + name = "private.postgres.database.azure.com" + resource_group_name = azurerm_resource_group.rg.name +} + +resource "azurerm_private_dns_zone_virtual_network_link" "postgres" { + name = "private-postgres-vnet-link" + resource_group_name = azurerm_resource_group.rg.name + private_dns_zone_name = azurerm_private_dns_zone.postgres.name + virtual_network_id = azurerm_virtual_network.vnet.id +} + +``` + +
+ +
+ +Le fichier `providers.tf` définirait les fournisseurs Terraform, tels qu'Azure. + +```yaml +// providers.tf +terraform { + required_version = ">=0.12" + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "=3.87.0" + } + random = { + source = "hashicorp/random" + version = "~>3.0" + } + } +} + +provider "azurerm" { + subscription_id = var.subscription_id + features {} +} +``` + +
+ +
+ +`variables.tf` déclarera les variables utilisées dans tous les fichiers `.tf`. + +```yaml +// variables.tf +variable "resource_group_location" { + default = "westeurope" + description = "Location of the resource group." +} + +variable "container_rg_name" { + default = "acrllm" + description = "Name of container regrestry." +} + +variable "subscription_id" { + type = string + sensitive = true + description = "Service Subscription ID" +} + +variable "subscription_name" { + type = string + description = "Service Subscription Name" +} + + +variable "project_name" { + type = string + description = "Project Name" +} + +variable "db_username" { + type = string + description = "DB User Name" +} + +variable "db_password" { + type = string + sensitive = true + description = "DB Password" +} + +variable "flowise_secretkey_overwrite" { + type = string + sensitive = true + description = "Flowise secret key" +} + +variable "webapp_ip_rules" { + type = list(object({ + name = string + ip_address = string + headers = string + virtual_network_subnet_id = string + subnet_id = string + service_tag = string + priority = number + action = string + })) +} + +variable "postgres_ip_rules" { + description = "A map of IP addresses and their corresponding names for firewall rules" + type = map(string) + default = {} +} + +variable "flowise_image" { + type = string + description = "Flowise image from Docker Hub" +} + +variable "tagged_image" { + type = string + description = "Tag for flowise image version" +} +``` + +
+ +
+ +`webapp.tf` Services d'application Azure incluant un plan de service et une application web Linux + +```yaml +// webapp.tf +#Create the Linux App Service Plan +resource "azurerm_service_plan" "webappsp" { + name = "asp${var.project_name}" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + os_type = "Linux" + sku_name = "P3v3" +} + +resource "azurerm_linux_web_app" "webapp" { + name = var.project_name + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + service_plan_id = azurerm_service_plan.webappsp.id + + app_settings = { + DOCKER_ENABLE_CI = true + WEBSITES_CONTAINER_START_TIME_LIMIT = 1800 + WEBSITES_ENABLE_APP_SERVICE_STORAGE = false + DATABASE_TYPE = "postgres" + DATABASE_HOST = azurerm_postgresql_flexible_server.postgres.fqdn + DATABASE_NAME = azurerm_postgresql_flexible_server_database.production.name + DATABASE_USER = azurerm_postgresql_flexible_server.postgres.administrator_login + DATABASE_PASSWORD = azurerm_postgresql_flexible_server.postgres.administrator_password + DATABASE_PORT = 5432 + FLOWISE_SECRETKEY_OVERWRITE = var.flowise_secretkey_overwrite + PORT = 3000 + SECRETKEY_PATH = "/root" + DOCKER_IMAGE_TAG = var.tagged_image + } + + storage_account { + name = "${var.project_name}_mount" + access_key = azurerm_storage_account.sa.primary_access_key + account_name = azurerm_storage_account.sa.name + share_name = azurerm_storage_share.flowise-share.name + type = "AzureFiles" + mount_path = "/root" + } + + + https_only = true + + site_config { + always_on = true + vnet_route_all_enabled = true + dynamic "ip_restriction" { + for_each = var.webapp_ip_rules + content { + name = ip_restriction.value.name + ip_address = ip_restriction.value.ip_address + } + } + application_stack { + docker_image_name = var.flowise_image + docker_registry_url = "https://${azurerm_container_registry.acr.login_server}" + docker_registry_username = azurerm_container_registry.acr.admin_username + docker_registry_password = azurerm_container_registry.acr.admin_password + } + } + + logs { + http_logs { + file_system { + retention_in_days = 7 + retention_in_mb = 35 + } + + } + } + + identity { + type = "SystemAssigned" + } + + lifecycle { + create_before_destroy = false + + ignore_changes = [ + virtual_network_subnet_id + ] + } + +} + +resource "azurerm_app_service_virtual_network_swift_connection" "webappvnetintegrationconnection" { + app_service_id = azurerm_linux_web_app.webapp.id + subnet_id = azurerm_subnet.webappsubnet.id + + depends_on = [azurerm_linux_web_app.webapp, azurerm_subnet.webappsubnet] +} + +``` + +
+ +Note : Le répertoire `.terraform` est créé par Terraform lors de l'initialisation d'un projet (`terraform init`) et il contient les plugins et fichiers binaires nécessaires au bon fonctionnement de Terraform. Le fichier `.terraform.lock.hcl` est utilisé pour enregistrer les versions exactes des fournisseurs qui sont utilisées afin d'assurer des installations cohérentes sur différentes machines. + +Naviguez vers votre répertoire de projet Terraform et exécutez : + +```bash +terraform init +``` + +This will initialize Terraform and download the required providers. + +### Configurer les variables Terraform + +### Déployer avec Terraform + +1. **Planifier le déploiement** : Exécutez la commande Terraform plan pour voir quels ressources seront créées : + + ```bash + terraform plan + ``` +2. **Appliquer le déploiement** : Si vous êtes satisfait du plan, appliquez les modifications : + + ```bash + terraform apply + ``` + + Confirmez l'action lorsqu'on vous le demande, et Terraform commencera à créer les ressources. +3. **Vérifier le déploiement** : Une fois que Terraform a terminé, il affichera toutes les sorties définies telles que les adresses IP ou les noms de domaine. Vérifiez que les ressources sont correctement déployées dans votre portail Azure. + +*** + +## Instance de Conteneur Azure : Utilisation de l'interface utilisateur du portail Azure ou de l'Azure CLI + +### Prérequis + +1. _(Optionnel)_ [Installer l'Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) si vous souhaitez suivre les commandes basées sur la CLI + +## Créer une instance de conteneur sans stockage persistant + +Sans stockage persistant, vos données sont conservées en mémoire. Cela signifie qu'après un redémarrage du conteneur, toutes les données que vous avez stockées disparaîtront. + +### Dans le portail + +1. Recherchez les instances de conteneur dans le Marketplace et cliquez sur Créer : + +

Entrée des instances de conteneur dans le Marketplace d'Azure

+ +2. Sélectionnez ou créez un groupe de ressources, un nom de conteneur, une région, une source d'image `Autre registre`, un type d'image, l'image `flowiseai/flowise`, le type de système d'exploitation et la taille. Cliquez ensuite sur "Suivant : Réseau" pour configurer les ports Flowise : + +

Première page de l'assistant de création d'instance de conteneur

+ +3. Ajoutez un nouveau port `3000 (TCP)` à côté du port par défaut `80 (TCP)`. Sélectionnez ensuite "Suivant : Avancé" : + +

Deuxième page de l'assistant de création d'instance de conteneur. Elle demande le type de réseau et les ports.

+ +4. Définissez la politique de redémarrage sur `En cas d'échec`. Ajoutez l'override de commande `["/bin/sh", "-c", "flowise start"]`. Cliquez enfin sur "Vérifier + créer" : + +

Troisième page de l'assistant de création d'instance de conteneur. Elle demande la politique de redémarrage, les variables d'environnement et la commande à exécuter au démarrage du conteneur.

+ +5. Vérifiez les paramètres finaux et cliquez sur "Créer" : + +

Page de révision finale et de création pour une instance de conteneur.

+ +6. Une fois la création terminée, cliquez sur "Accéder à la ressource" + +

Page de résultat de création de ressource dans Azure.

+ +7. Visitez votre instance Flowise en copiant l'adresse IP et en ajoutant :3000 comme port : + +

Page d'aperçu de l'instance de conteneur

+ +

Application Flowise déployée en tant qu'instance de conteneur

+ +### Créer en utilisant l'Azure CLI + +1. Créez un groupe de ressources (si vous n'en avez pas déjà un) + +```bash +az group create --name flowise-rg --location "West US" +``` + +2. Créez une instance de conteneur + +```bash +az container create -g flowise-rg \ + --name flowise \ + --image flowiseai/flowise \ + --command-line "/bin/sh -c 'flowise start'" \ + --ip-address public \ + --ports 80 3000 \ + --restart-policy OnFailure +``` + +3. Visitez l'adresse IP (y compris le port :3000) affichée dans la sortie de la commande ci-dessus. + +## Créer une instance de conteneur avec stockage persistant + +La création d'une instance de conteneur avec stockage persistant n'est possible que via la CLI : + +1. Créez un groupe de ressources (si vous n'en avez pas déjà un) + +```bash +az group create --name flowise-rg --location "West US" +``` +```markdown +2. Créez la ressource de compte de stockage (ou utilisez un compte existant) dans le groupe de ressources ci-dessus. Vous pouvez vérifier comment le faire [ici](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-portal?tabs=azure-portal). +3. Dans Azure Storage, créez un nouveau partage de fichiers. Vous pouvez vérifier comment le faire [ici](https://learn.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-portal?tabs=azure-portal). +4. Créez une instance de conteneur +``````bash +az container create -g flowise-rg \ + --name flowise \ + --image flowiseai/flowise \ + --command-line "/bin/sh -c 'flowise start'" \ + --environment-variables DATABASE_PATH=/opt/flowise/.flowise SECRETKEY_PATH=/opt/flowise/.flowise LOG_PATH=/opt/flowise/.flowise/logs BLOB_STORAGE_PATH=/opt/flowise/.flowise/storage \ + --ip-address public \ + --ports 80 3000 \ + --restart-policy OnFailure \ + --azure-file-volume-share-name here goes the name of your File share \ + --azure-file-volume-account-name here goes the name of your Storage Account \ + --azure-file-volume-account-key here goes the access key to your Storage Account \ + --azure-file-volume-mount-path /opt/flowise/.flowise +``` + +5. Visitez l'adresse IP (y compris le port :3000) affichée dans la sortie de la commande ci-dessus. +6. À partir de maintenant, vos données seront stockées dans une base de données SQLite que vous pouvez trouver dans votre partage de fichiers. + +Regardez le tutoriel vidéo sur le déploiement sur Azure Container Instance : + +{% embed url="https://www.youtube.com/watch?v=yDebxDfn2yk" %} diff --git a/fr/configuration/deployment/digital-ocean.md b/fr/configuration/deployment/digital-ocean.md new file mode 100644 index 00000000..d1bdfc66 --- /dev/null +++ b/fr/configuration/deployment/digital-ocean.md @@ -0,0 +1,281 @@ +--- +description: Apprenez à déployer Flowise sur Digital Ocean +--- + +# Digital Ocean + +*** + +## Créer un Droplet + +Dans cette section, nous allons créer un Droplet. Pour plus d'informations, consultez le [guide officiel](https://docs.digitalocean.com/products/droplets/quickstart/). + +1. Tout d'abord, cliquez sur **Droplets** dans le menu déroulant + +
+ +2. Sélectionnez la région de données et un type de Droplet Basique à 6 $/mois + +
+ +3. Sélectionnez la méthode d'authentification. Dans cet exemple, nous allons utiliser un mot de passe + +
+ +4. Après un moment, vous devriez voir votre Droplet créé avec succès + +
+ +## Comment se connecter à votre Droplet + +Pour Windows, suivez ce [guide](https://docs.digitalocean.com/products/droplets/how-to/connect-with-ssh/putty/). + +Pour Mac/Linux, suivez ce [guide](https://docs.digitalocean.com/products/droplets/how-to/connect-with-ssh/openssh/). + +## Installer Docker + +1. ``` + curl -fsSL https://get.docker.com -o get-docker.sh ``` +2. ``` + sudo sh get-docker.sh + ``` +3. Installer docker-compose : + +``` +sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose +``` + +4. Définir les autorisations : + +``` +sudo chmod +x /usr/local/bin/docker-compose +``` + +## Configuration + +1. Clone le dépôt + +``` +git clone https://github.com/FlowiseAI/Flowise.git +``` + +2. Accédez au dossier docker + +```bash +cd Flowise && cd docker +``` + +3. Créez un fichier `.env`. Vous pouvez utiliser votre éditeur préféré. J'utiliserai `nano` + +```bash +nano .env +``` + +
+ +4. Spécifiez les variables d'environnement : + +```sh +PORT=3000 +DATABASE_PATH=/root/.flowise +SECRETKEY_PATH=/root/.flowise +LOG_PATH=/root/.flowise/logs +BLOB_STORAGE_PATH=/root/.flowise/storage +``` + +5. Ensuite, appuyez sur `Ctrl + X` pour quitter, puis sur `Y` pour enregistrer le fichier +6. Exécutez docker compose + +```bash +docker compose up -d +``` + +7. Vous pouvez ensuite visualiser l'application : "Votre DNS IPv4 public" :3000. Exemple : `176.63.19.226:3000` +8. Vous pouvez arrêter l'application en : + +```bash +docker compose stop +``` + +9. Vous pouvez récupérer la dernière image en : + +```bash +docker pull flowiseai/flowise +``` + +## Ajout d'un Proxy Inverse & SSL + +Un proxy inverse est la méthode recommandée pour exposer un serveur d'application à Internet. Il nous permettra de nous connecter à notre droplet en utilisant uniquement une URL au lieu de l'adresse IP du serveur et du numéro de port. Cela offre des avantages en matière de sécurité en isolant le serveur d'application de l'accès direct à Internet, la possibilité de centraliser la protection du pare-feu, un plan d'attaque minimisé pour les menaces courantes telles que les attaques par déni de service, et surtout pour nos besoins, la capacité de terminer le chiffrement SSL/TLS à un seul endroit. + +> A lack of SSL on your Droplet will cause the embeddable widget and API endpoints to be inaccessible in modern browsers. This is because browsers have begun to deprecate HTTP in favor of HTTPS, and block HTTP requests from pages loaded over HTTPS. + +### Étape 1 — Installation de Nginx + +1. Nginx est disponible pour installation avec apt via les dépôts par défaut. Mettez à jour votre index de dépôts, puis installez Nginx : + +```bash +sudo apt update +sudo apt install nginx +``` + +> Press Y to confirm the installation. If you are asked to restart services, press ENTER to accept the defaults. + +```markdown +2. Vous devez autoriser l'accès à Nginx via votre pare-feu. Après avoir configuré votre serveur selon les prérequis initiaux, ajoutez la règle suivante avec ufw : +``` + +```bash +sudo ufw allow 'Nginx HTTP' +``` + +3. Maintenant, vous pouvez vérifier que Nginx fonctionne : + +```bash +systemctl status nginx +``` + +Sure, please provide the Markdown chunk you would like me to translate into French. + +```bash +● nginx.service - A high performance web server and a reverse proxy server + Loaded: loaded (/lib/systemd/system/nginx.service; enabled; vendor preset: enabled) + Active: active (running) since Mon 2022-08-29 06:52:46 UTC; 39min ago + Docs: man:nginx(8) + Main PID: 9919 (nginx) + Tasks: 2 (limit: 2327) + Memory: 2.9M + CPU: 50ms + CGroup: /system.slice/nginx.service + ├─9919 "nginx: master process /usr/sbin/nginx -g daemon on; master_process on;" + └─9920 "nginx: worker process +``` + +Next you will add a custom server block with your domain and app server proxy. + +### Étape 2 — Configuration de votre bloc serveur + enregistrement DNS + +Il est recommandé de créer un fichier de configuration personnalisé pour vos nouvelles additions de bloc serveur, plutôt que de modifier directement la configuration par défaut. + +1. Créez et ouvrez un nouveau fichier de configuration Nginx en utilisant nano ou votre éditeur de texte préféré : + +```bash +sudo nano /etc/nginx/sites-available/your_domain +``` + +2. Insérez ce qui suit dans votre nouveau fichier, en veillant à remplacer `your_domain` par le nom de votre propre domaine : + +``` +server { + listen 80; + listen [::]:80; + server_name your_domain; #Example: demo.flowiseai.com + location / { + proxy_pass http://localhost:3000; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + } +} +``` + +3. Enregistrez et quittez, avec `nano` vous pouvez le faire en appuyant sur `CTRL+O` puis `CTRL+X`. +4. Ensuite, activez ce fichier de configuration en créant un lien depuis celui-ci vers le répertoire sites-enabled que Nginx lit au démarrage, en vous assurant encore une fois de remplacer `your_domain` par votre propre nom de domaine : + +```bash +sudo ln -s /etc/nginx/sites-available/your_domain /etc/nginx/sites-enabled/ +``` + +5. Vous pouvez maintenant tester votre fichier de configuration pour détecter des erreurs de syntaxe : + +```bash +sudo nginx -t +``` + +6. Sans problèmes signalés, redémarrez Nginx pour appliquer vos modifications : + +```bash +sudo systemctl restart nginx +``` + +7. Allez chez votre fournisseur DNS et ajoutez un nouvel enregistrement A. Le nom sera votre nom de domaine, et la valeur sera l'adresse IPv4 publique de votre droplet. + +
+ +Nginx est maintenant configuré comme un proxy inverse pour votre serveur d'application. Vous devriez maintenant pouvoir ouvrir l'application : http://yourdomain.com. + +### Étape 3 — Installation de Certbot pour HTTPS (SSL) + +Si vous souhaitez ajouter une connexion sécurisée `https` à votre Droplet comme https://yourdomain.com, vous devrez faire ce qui suit : + +1. Pour installer Certbot et activer HTTPS sur NGINX, nous allons nous appuyer sur Python. Donc, tout d'abord, configurons un environnement virtuel : + +```bash +apt install python3.10-venv +sudo python3 -m venv /opt/certbot/ +sudo /opt/certbot/bin/pip install --upgrade pip +``` + +2. Ensuite, exécutez cette commande pour installer Certbot : + +```bash +sudo /opt/certbot/bin/pip install certbot certbot-nginx +``` + +3. Maintenant, exécutez la commande suivante pour vous assurer que la commande `certbot` peut être exécutée : + +```bash +sudo ln -s /opt/certbot/bin/certbot /usr/bin/certbot +``` + +4. Enfin, exécutez la commande suivante pour obtenir un certificat et permettre à Certbot de modifier automatiquement la configuration NGINX, activant ainsi HTTPS : + +```bash +sudo certbot --nginx +``` + +5. Après avoir suivi l'assistant de génération de certificat, nous pourrons accéder à notre Droplet via HTTPS en utilisant l'adresse https://yourdomain.com + +### Configurer le renouvellement automatique + +Pour permettre à Certbot de renouveler automatiquement les certificats, il suffit d'ajouter une tâche cron en exécutant la commande suivante : + +```bash +echo "0 0,12 * * * root /opt/certbot/bin/python -c 'import random; import time; time.sleep(random.random() * 3600)' && sudo certbot renew -q" | sudo tee -a /etc/crontab > /dev/null +``` + +## Félicitations ! + +Vous avez réussi à configurer Flowise sur votre Droplet, avec un certificat SSL sur votre domaine [🥳](https://emojipedia.org/partying-face/) + +## Étapes pour mettre à jour Flowise sur Digital Ocean + +1. Accédez au répertoire dans lequel vous avez installé flowise + +```bash +cd Flowise/docker +``` + +2. Arrêter et supprimer l'image docker + +Remarque : Cela ne supprimera pas vos flux car la base de données est stockée dans un dossier séparé + +```bash +sudo docker compose stop +sudo docker compose rm +``` + +3. Récupérez la dernière image Flowise + +Vous pouvez consulter la dernière version publiée [ici](https://github.com/FlowiseAI/Flowise/releases) + +```bash +docker pull flowiseai/flowise +``` + +4. Démarrer le docker + +```bash +docker compose up -d +``` diff --git a/fr/configuration/deployment/gcp.md b/fr/configuration/deployment/gcp.md new file mode 100644 index 00000000..a78fad2b --- /dev/null +++ b/fr/configuration/deployment/gcp.md @@ -0,0 +1,194 @@ +--- +description: Apprenez à déployer Flowise sur GCP +--- + +# GCP + +*** + +## Prérequis + +1. Notez votre Google Cloud \[ProjectId] +2. Installez [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) +3. Installez le [Google Cloud CLI](https://cloud.google.com/sdk/docs/install-sdk) +4. Installez [Docker Desktop](https://docs.docker.com/desktop/) + +## Configuration du Cluster Kubernetes + +1. Créez un Cluster Kubernetes si vous n'en avez pas. + +

Cliquez sur `Clusters` pour en créer un.

+ +2. Nommez le Cluster, choisissez le bon emplacement des ressources, utilisez le mode `Autopilot` et conservez toutes les autres configurations par défaut. +3. Une fois le Cluster créé, cliquez sur le menu 'Connecter' dans le menu des actions. + +
+ +4. Copiez la commande et collez-la dans votre terminal, puis appuyez sur entrée pour connecter votre cluster. +5. Exécutez la commande ci-dessous et sélectionnez le bon nom de contexte, qui ressemble à `gke_[ProjectId]_[DataCenter]_[ClusterName]` + +``` +kubectl config get-contexts +``` + +6. Définir le contexte actuel + +``` +kubectl config use-context gke_[ProjectId]_[DataCenter]_[ClusterName] +``` + +## Construire et pousser l'image Docker + +Exécutez les commandes suivantes pour construire et pousser l'image Docker vers le registre de conteneurs GCP. + +1. Clonez Flowise + +``` +git clone https://github.com/FlowiseAI/Flowise.git +``` + +2. Construire le Flowise + +``` +cd Flowise +pnpm install +pnpm build +``` + +3. Mettez à jour légèrement le fichier `Dockerfile`. + +> Specify the platform of nodejs +> +> ``` +> FROM --platform=linux/amd64 node:18-alpine +> ``` +> +> Add python3, make and g++ to install +> +> ``` +> RUN apk add --no-cache python3 make g++ +> ``` + +3. Construire en tant qu'image Docker, assurez-vous que l'application Docker Desktop est en cours d'exécution + +``` +docker build -t gcr.io/[ProjectId]/flowise:dev . +``` + +4. Poussez l'image Docker vers le registre de conteneurs GCP. + +``` +docker push gcr.io/[ProjectId]/flowise:dev +``` + +## Déploiement sur GCP + +1. Créez un dossier racine `yamls` dans le projet. +2. Ajoutez le fichier `deployment.yaml` dans ce dossier. + +``` +# deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: flowise + labels: + app: flowise +spec: + selector: + matchLabels: + app: flowise + replicas: 1 + template: + metadata: + labels: + app: flowise + spec: + containers: + - name: flowise + image: gcr.io/[ProjectID]/flowise:dev + imagePullPolicy: Always + resources: + requests: + cpu: "1" + memory: "1Gi" +``` + +3. Ajoutez le fichier `service.yaml` dans ce dossier. + +``` +# service.yaml +apiVersion: "v1" +kind: "Service" +metadata: + name: "flowise-service" + namespace: "default" + labels: + app: "flowise" +spec: + ports: + - protocol: "TCP" + port: 80 + targetPort: 3000 + selector: + app: "flowise" + type: "LoadBalancer" + +``` + +It ressemblera à ce qui suit. + +
+ +4. Déployez les fichiers yaml en exécutant les commandes suivantes. + +``` +kubectl apply -f yamls/deployment.yaml +kubectl apply -f yamls/service.yaml +``` + +5. Allez dans `Workloads` dans le GCP, vous pouvez voir que votre pod est en cours d'exécution. + +
+ +6. Allez dans `Services & Ingress`, vous pouvez cliquer sur le `Endpoint` où Flowise est hébergé. + +
+ +## Félicitations ! + +Vous avez réussi à héberger les applications Flowise sur GCP [🥳](https://emojipedia.org/partying-face/) + +## Délai d'attente + +Par défaut, un délai d'attente de 30 secondes est attribué au proxy par GCP. Cela a causé des problèmes lorsque la réponse prenait plus de 30 secondes pour revenir. Pour résoudre ce problème, apportez les modifications suivantes aux fichiers YAML : + +Remarque : Pour définir le délai d'attente à 10 minutes (par exemple) -- nous spécifions 600 secondes ci-dessous. + +1. Créez un fichier `backendconfig.yaml` avec le contenu suivant : + +```yaml +apiVersion: cloud.google.com/v1 +kind: BackendConfig +metadata: + name: flowise-backendconfig + namespace: your-namespace +spec: + timeoutSec: 600 +``` + +2. Problème : `kubectl apply -f backendconfig.yaml` +3. Mettez à jour votre fichier `service.yaml` avec la référence suivante au `BackendConfig` : + +```yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + cloud.google.com/backend-config: '{"default": "flowise-backendconfig"}' + name: flowise-service + namespace: your-namespace +... +``` + +4. Problème : `kubectl apply -f service.yaml` diff --git a/fr/configuration/deployment/hugging-face.md b/fr/configuration/deployment/hugging-face.md new file mode 100644 index 00000000..e82f1e54 --- /dev/null +++ b/fr/configuration/deployment/hugging-face.md @@ -0,0 +1,63 @@ +--- +description: Apprenez à déployer Flowise sur Hugging Face +--- + +# Hugging Face + +*** + +### Créer un nouvel espace + +1. Connectez-vous à [Hugging Face](https://huggingface.co/login) +2. Commencez à créer un [nouvel espace](https://huggingface.co/new-space) avec le nom de votre choix. +3. Sélectionnez **Docker** comme **SDK de l'espace** et choisissez **Vide** comme modèle Docker. +4. Sélectionnez **CPU de base ∙ 2 vCPU ∙ 16 Go ∙ GRATUIT** comme **matériel de l'espace**. +5. Cliquez sur **Créer l'espace**. + +### Définir les variables d'environnement + +1. Allez dans **Paramètres** de votre nouvel espace et trouvez la section **Variables et Secrets** +2. Cliquez sur **Nouvelle variable** et ajoutez le nom `PORT` avec la valeur `7860` +3. Cliquez sur **Sauvegarder** +4. _(Optionnel)_ Cliquez sur **Nouveau secret** +5. _(Optionnel)_ Remplissez avec vos variables d'environnement, telles que les identifiants de base de données, les chemins de fichiers, etc. Vous pouvez vérifier les champs valides dans le fichier `.env.example` [ici](https://github.com/FlowiseAI/Flowise/blob/main/docker/.env.example) + +### Créer un Dockerfile + +1. Dans l'onglet des fichiers, cliquez sur le bouton _**+ Ajouter un fichier**_ et cliquez sur **Créer un nouveau fichier** (ou téléchargez des fichiers si vous préférez) +2. Créez un fichier appelé **Dockerfile** et collez ce qui suit : + +```Dockerfile +FROM node:18-alpine +USER root + +# Arguments that can be passed at build time +ARG FLOWISE_PATH=/usr/local/lib/node_modules/flowise +ARG BASE_PATH=/root/.flowise +ARG DATABASE_PATH=$BASE_PATH +ARG SECRETKEY_PATH=$BASE_PATH +ARG LOG_PATH=$BASE_PATH/logs +ARG BLOB_STORAGE_PATH=$BASE_PATH/storage + +# Install dependencies +RUN apk add --no-cache git python3 py3-pip make g++ build-base cairo-dev pango-dev chromium + +ENV PUPPETEER_SKIP_DOWNLOAD=true +ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser + +# Install Flowise globally +RUN npm install -g flowise + +# Configure Flowise directories using the ARG +RUN mkdir -p $LOG_PATH $FLOWISE_PATH/uploads && chmod -R 777 $LOG_PATH $FLOWISE_PATH + +WORKDIR /data + +CMD ["npx", "flowise", "start"] +``` + +3. Cliquez sur **Valider le fichier dans `main`** et cela commencera à construire votre application. + +### Terminé 🎉 + +Lorsque la construction est terminée, vous pouvez cliquer sur l'onglet **Application** pour voir votre application en cours d'exécution. diff --git a/fr/configuration/deployment/railway.md b/fr/configuration/deployment/railway.md new file mode 100644 index 00000000..982e875f --- /dev/null +++ b/fr/configuration/deployment/railway.md @@ -0,0 +1,49 @@ +--- +description: Apprenez à déployer Flowise sur Railway +--- + +# Railway + +*** + +1. Cliquez sur le [modèle](https://railway.app/template/pn4G8S?referralCode=WVNPD9) préconstruit suivant +2. Cliquez sur Déployer maintenant + +
+ +3. Changez le nom du dépôt selon votre préférence et cliquez sur Déployer + +
+ +4. Si cela réussit, vous devriez voir une URL déployée + +
+ +5. Pour ajouter une autorisation, allez dans l'onglet Variables et ajoutez : + +* FLOWISE\_USERNAME +* FLOWISE\_PASSWORD + +
+ +6. Il existe une liste de variables d'environnement que vous pouvez configurer. Consultez [environment-variables.md](../environment-variables.md "mention") + +C'est tout ! Vous avez maintenant un Flowise déployé sur Railway [🎉](https://emojipedia.org/party-popper/)[🎉](https://emojipedia.org/party-popper/) + +## Volume Persistant + +Le système de fichiers par défaut pour les services fonctionnant sur Railway est éphémère. Les données de Flowise ne sont pas conservées entre les déploiements et les redémarrages. Pour résoudre ce problème, nous pouvons utiliser [Railway Volume](https://docs.railway.app/reference/volumes). + +Pour simplifier les étapes, nous avons un modèle Railway avec volume monté : [https://railway.app/template/nEGbjR](https://railway.app/template/nEGbjR) + +Il suffit de cliquer sur Déployer et de remplir les variables d'environnement comme ci-dessous : + +* DATABASE\_PATH - `/opt/railway/.flowise` +* APIKEY\_PATH - `/opt/railway/.flowise` +* LOG\_PATH - `/opt/railway/.flowise/logs` +* SECRETKEY\_PATH - `/opt/railway/.flowise` +* BLOB\_STORAGE\_PATH - `/opt/railway/.flowise/storage` + +
+ +Essayez maintenant de créer un flux et de l'enregistrer dans Flowise. Ensuite, essayez de redémarrer le service ou de redéployer, vous devriez toujours voir le flux que vous avez enregistré précédemment. \ No newline at end of file diff --git a/fr/configuration/deployment/render.md b/fr/configuration/deployment/render.md new file mode 100644 index 00000000..c8222fdb --- /dev/null +++ b/fr/configuration/deployment/render.md @@ -0,0 +1,83 @@ +--- +description: Apprenez à déployer Flowise sur Render +--- + +# Render + +*** + +1. Forkez le [dépôt officiel de Flowise](https://github.com/FlowiseAI/Flowise) +2. Visitez votre profil GitHub pour vous assurer que vous avez bien effectué un fork +3. Connectez-vous à [Render](https://dashboard.render.com) +4. Cliquez sur **Nouveau +** + +
+ +5. Sélectionnez **Service Web** + +
+ +6. Connectez votre compte GitHub +7. Sélectionnez votre dépôt Flowise forké et cliquez sur **Connecter** + +
+ +8. Remplissez votre **Nom** et **Région** préférés. +9. Sélectionnez `Docker` comme votre **Runtime** + +
+ +9. Sélectionnez une **Instance** + +
+ +10. _(Optionnel)_ Ajoutez une autorisation au niveau de l'application, cliquez sur **Avancé** et ajoutez `Variable d'environnement` + +* FLOWISE\_USERNAME +* FLOWISE\_PASSWORD + +
+ +Ajoutez `NODE_VERSION` avec la valeur `18.18.1` comme version de node pour exécuter l'instance. + +Il y a une liste de variables d'environnement que vous pouvez configurer. Consultez [environment-variables.md](../environment-variables.md "mention") + +11. Cliquez sur **Créer un service web** + +
+ +12. Accédez à l'URL déployée et c'est tout [🚀](https://emojipedia.org/rocket/)[🚀](https://emojipedia.org/rocket/) + +
+ +## Disque Persistant + +Le système de fichiers par défaut pour les services fonctionnant sur Render est éphémère. Les données de Flowise ne sont pas conservées entre les déploiements et les redémarrages. Pour résoudre ce problème, nous pouvons utiliser [Render Disk](https://render.com/docs/disks). + +1. Dans la barre latérale gauche, cliquez sur **Disques** +2. Nommez votre disque et spécifiez le **Chemin de montage** à `/opt/render/.flowise` + +
+ +3. Cliquez sur la section **Environnement**, et ajoutez ces nouvelles variables d'environnement : + +* HOST - `0.0.0.0` +* DATABASE\_PATH - `/opt/render/.flowise` +* APIKEY\_PATH - `/opt/render/.flowise` +* LOG\_PATH - `/opt/render/.flowise/logs` +* SECRETKEY\_PATH - `/opt/render/.flowise` +* BLOB\_STORAGE\_PATH - `/opt/render/.flowise/storage` + +
+ +4. Cliquez sur **Déploiement manuel** puis sélectionnez **Effacer le cache de construction et déployer** + +
+ +5. Essayez maintenant de créer un flux et de l'enregistrer dans Flowise. Ensuite, essayez de redémarrer le service ou de redéployer, vous devriez toujours pouvoir voir le flux que vous avez enregistré précédemment. + +Regardez comment déployer sur Render + +{% embed url="https://youtu.be/Fxyc6-frgrI" %} + +{% embed url="https://youtu.be/l-0NzOMeCco" %} \ No newline at end of file diff --git a/fr/configuration/deployment/replit.md b/fr/configuration/deployment/replit.md new file mode 100644 index 00000000..5402dea1 --- /dev/null +++ b/fr/configuration/deployment/replit.md @@ -0,0 +1,42 @@ +--- +description: Apprenez à déployer Flowise sur Replit +--- + +# Replit + +*** + +1. Connectez-vous à [Replit](https://replit.com/~) +2. Créez un nouveau **Repl**. Sélectionnez **Node.js** comme modèle et remplissez votre **Titre** préféré. + +
+ +3. Après la création d'un nouveau Repl, dans la barre latérale gauche, cliquez sur Secret : + +
+ +4. Créez 3 Secrets pour ignorer le téléchargement de Chromium pour les bibliothèques Puppeteer et Playwright. + +
SecretsValeur
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD1
PUPPETEER_SKIP_DOWNLOADtrue
PUPPETEER_SKIP_CHROMIUM_DOWNLOADtrue
+ +
+ +5. Vous pouvez maintenant passer à l'onglet Shell + +
+ +6. Tapez `npm install -g flowise` dans la fenêtre du terminal Shell. Si vous rencontrez une erreur concernant une version de node incompatible, utilisez la commande suivante `yarn global add flowise --ignore-engines` + +
+ +7. Ensuite, suivez avec `npx flowise start` + +
+ +8. Vous devriez maintenant pouvoir voir Flowise sur Replit ! + +
+ +9. Vous verrez maintenant une page de connexion. Connectez-vous simplement avec le nom d'utilisateur et le mot de passe que vous avez définis. + +
\ No newline at end of file diff --git a/fr/configuration/deployment/sealos.md b/fr/configuration/deployment/sealos.md new file mode 100644 index 00000000..15ca4b5a --- /dev/null +++ b/fr/configuration/deployment/sealos.md @@ -0,0 +1,36 @@ +--- +description: Apprenez à déployer Flowise sur Sealos +--- + +# Sealos + +*** + +1. Cliquez sur le [modèle](https://template.sealos.io/deploy?templateName=flowise) préconstruit ci-dessous ou sur le bouton ci-dessous. + +[![Déployer sur Sealos](https://sealos.io/Deploy-on-Sealos.svg)](https://template.sealos.io/deploy?templateName=flowise) + +2. Ajoutez l'autorisation + * FLOWISE\_USERNAME + * FLOWISE\_PASSWORD + +
+ +3. Cliquez sur "Déployer l'application" sur la page du modèle pour commencer le déploiement. +4. Une fois le déploiement terminé, cliquez sur "Détails" pour accéder aux détails de l'application. + +
+ +5. Attendez que le statut de l'application passe à "en cours d'exécution". Ensuite, cliquez sur le lien externe pour ouvrir l'interface Web de l'application directement via le domaine externe. + +
+ +## Volume Persistant + +Cliquez sur "Mettre à jour" en haut à droite de la page des détails de l'application, puis cliquez sur "Avancé" -> "Ajouter un volume", remplissez la valeur du "chemin de montage" : `/root/.flowise`. + +
+ +Pour terminer, cliquez sur le bouton "Déployer". + +Essayez maintenant de créer un flux et de l'enregistrer dans Flowise. Ensuite, essayez de redémarrer le service ou de redéployer, vous devriez toujours pouvoir voir le flux que vous avez enregistré précédemment. \ No newline at end of file diff --git a/fr/configuration/deployment/zeabur.md b/fr/configuration/deployment/zeabur.md new file mode 100644 index 00000000..52a47d21 --- /dev/null +++ b/fr/configuration/deployment/zeabur.md @@ -0,0 +1,42 @@ +--- +description: Apprenez à déployer Flowise sur Zeabur +--- + +# Zeabur + +*** + +{% hint style="warning" %} +Veuillez noter que le modèle suivant créé par Zeabur est obsolète (du 2024-01-24). +{% endhint %} + +1. Cliquez sur le [modèle](https://zeabur.com/templates/2JYZTR) préconstruit ci-dessous ou sur le bouton ci-dessous. + +[![Déployer sur Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/2JYZTR) + +2. Cliquez sur Déployer + +
modèle zeabur
+ +3. Sélectionnez votre région préférée et continuez + +
sélectionner la région
+ +4. Vous serez redirigé vers le tableau de bord de Zeabur et vous verrez le processus de déploiement + +
processus de déploiement
+ +5. Pour ajouter une autorisation, allez dans l'onglet Variables et ajoutez : + +* FLOWISE\_USERNAME +* FLOWISE\_PASSWORD + +
autorisation
+ +6. Il existe une liste de variables d'environnement que vous pouvez configurer. Consultez [environment-variables.md](../environment-variables.md "mention") + +C'est tout ! Vous avez maintenant déployé Flowise sur Zeabur [🎉](https://emojipedia.org/party-popper/)[🎉](https://emojipedia.org/party-popper/) + +## Volume Persistant + +Zeabur créera automatiquement un volume persistant pour vous, donc vous n'avez pas à vous en soucier. \ No newline at end of file diff --git a/fr/configuration/environment-variables.md b/fr/configuration/environment-variables.md new file mode 100644 index 00000000..c2f0f8bc --- /dev/null +++ b/fr/configuration/environment-variables.md @@ -0,0 +1,201 @@ +--- +description: Learn how to configure environment variables for Flowise +--- + +# Environment Variables + +Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Refer to [.env.example](https://github.com/FlowiseAI/Flowise/blob/main/packages/server/.env.example) file. + +
VariableDescriptionTypeDefault
PORTThe HTTP port Flowise runs onNumber3000
FLOWISE_FILE_SIZE_LIMITMaximum file size when uploadingString50mb
NUMBER_OF_PROXIESRate Limit ProxyNumber
CORS_ORIGINSThe allowed origins for all cross-origin HTTP callsString
IFRAME_ORIGINSThe allowed origins for iframe src embeddingString
SHOW_COMMUNITY_NODESDisplay nodes that are created by communityBoolean: true or false
DISABLED_NODESComma separated list of node names to disableString
+ +## For Database + +| Variable | Description | Type | Default | +| ------------------ | ---------------------------------------------------------------- | ------------------------------------------ | ------------------------ | +| DATABASE\_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` | +| DATABASE\_PATH | Location where database is saved (When DATABASE\_TYPE is sqlite) | String | `your-home-dir/.flowise` | +| DATABASE\_HOST | Host URL or IP address (When DATABASE\_TYPE is not sqlite) | String | | +| DATABASE\_PORT | Database port (When DATABASE\_TYPE is not sqlite) | String | | +| DATABASE\_USER | Database username (When DATABASE\_TYPE is not sqlite) | String | | +| DATABASE\_PASSWORD | Database password (When DATABASE\_TYPE is not sqlite) | String | | +| DATABASE\_NAME | Database name (When DATABASE\_TYPE is not sqlite) | String | | +| DATABASE\_SSL | Database SSL is required (When DATABASE\_TYPE is not sqlite) | Boolean: `true` or `false` | `false` | + +## For Storage + +Flowise store the following files under a local path folder by default. + +* Files uploaded on [Document Loaders](../integrations/langchain/document-loaders/)/Document Store +* Image/Audio uploads from chat +* Images/Files from Assistant +* Files from [Vector Upsert API](broken-reference) + +User can specify `STORAGE_TYPE` to use AWS S3, Google Cloud Storage or local path + +| Variable | Description | Type | Default | +| -------------------------------------- | -------------------------------------------------------------------------------- | --------------------------------- | -------------------------------- | +| STORAGE\_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `gcs`, `local` | `local` | +| BLOB\_STORAGE\_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` | +| S3\_STORAGE\_BUCKET\_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | | +| S3\_STORAGE\_ACCESS\_KEY\_ID | AWS Access Key | String | | +| S3\_STORAGE\_SECRET\_ACCESS\_KEY | AWS Secret Key | String | | +| S3\_STORAGE\_REGION | Region for S3 bucket | String | | +| S3\_ENDPOINT\_URL | Custom S3 endpoint (optional) | String | | +| S3\_FORCE\_PATH\_STYLE | Force S3 path style (optional) | Boolean | false | +| GOOGLE\_CLOUD\_STORAGE\_CREDENTIAL | Google Cloud Service Account Key | String | | +| GOOGLE\_CLOUD\_STORAGE\_PROJ\_ID | Google Cloud Project ID | String | | +| GOOGLE\_CLOUD\_STORAGE\_BUCKET\_NAME | Google Cloud Storage Bucket Name | String | | +| GOOGLE\_CLOUD\_UNIFORM\_BUCKET\_ACCESS | Type of Access | Boolean | true | + +## For Debugging and Logs + +| Variable | Description | Type | | +| ---------- | ----------------------------------- | ------------------------------------------------ | ------------------------------ | +| DEBUG | Print logs from components | Boolean | | +| LOG\_PATH | Location where log files are stored | String | `Flowise/packages/server/logs` | +| LOG\_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` | + +`DEBUG`: if set to true, will print logs to terminal/console: + +
+ +`LOG_LEVEL`: Different log levels for loggers to be saved. Can be `error`, `info`, `verbose`, or `debug.` By default it is set to `info,` only `logger.info` will be saved to the log files. If you want to have complete details, set to `debug`. + +

server-requests.log.jsonl - logs every request sent to Flowise

+ +

server.log - logs general actions on Flowise

+ +

server-error.log - logs error with stack trace

+ +### Logs Streaming S3 + +When `STORAGE_TYPE` env variable is set to `s3` , logs will be automatically streamed and stored to S3. New log file will be created hourly, enabling easier debugging. + +### Logs Streaming GCS + +When `STORAGE_TYPE` env variable is set to `gcs` , logs will be automatically streamed to Google [Cloud Logging](https://cloud.google.com/logging?hl=en). + +## For Credentials + +Flowise store your third party API keys as encrypted credentials using an encryption key. + +By default, a random encryption key will be generated when starting up the application and stored under a file path. This encryption key is then retrieved everytime to decrypt the credentials used within a chatflow. For example, your OpenAI API key, Pinecone API key, etc. + +You can configure to use AWS Secret Manager to store the encryption key instead. + +| Variable | Description | Type | Default | +| ----------------------------- | ----------------------------------------------------- | --------------------------- | ------------------------- | +| SECRETKEY\_STORAGE\_TYPE | How to store the encryption key | Enum String: `local`, `aws` | `local` | +| SECRETKEY\_PATH | Local file path where encryption key is saved | String | `Flowise/packages/server` | +| FLOWISE\_SECRETKEY\_OVERWRITE | Encryption key to be used instead of the existing key | String | | +| SECRETKEY\_AWS\_ACCESS\_KEY | | String | | +| SECRETKEY\_AWS\_SECRET\_KEY | | String | | +| SECRETKEY\_AWS\_REGION | | String | | + +For some reasons, sometimes encryption key might be re-generated or the stored path was changed, this will cause errors like - Credentials could not be decrypted. + +To avoid this, you can set your own encryption key as `FLOWISE_SECRETKEY_OVERWRITE`, so that the same encryption key will be used everytime. There is no restriction on the format, you can set it as any text that you want, or the same as your `FLOWISE_PASSWORD`. + +
+ +{% hint style="info" %} +Credential API Key returned from the UI is not the same length as your original Api Key that you have set. This is a fake prefix string that prevents network spoofing, that's why we are not returning the Api Key back to UI. However, the correct Api Key will be retrieved and used during your interaction with the chatflow. +{% endhint %} + +## For Models + +In some cases, you might want to use custom model on the existing Chat Model and LLM nodes, or restrict access to only certain models. + +By default, Flowise pulls the model list from [here](https://github.com/FlowiseAI/Flowise/blob/main/packages/components/models.json). However user can create their own `models.json` file and specify the file path: + +
VariableDescriptionTypeDefault
MODEL_LIST_CONFIG_JSONLink to load list of models from your models.json config fileStringhttps://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json
+ +## For Built-In and External Dependencies + +There are certain nodes/features within Flowise that allow user to run Javascript code. For security reasons, by default it only allow certain dependencies. It's possible to lift that restriction for built-in and external modules by setting the following environment variables: + +
VariableDescription
TOOL_FUNCTION_BUILTIN_DEPNodeJS built-in modules to be usedString
TOOL_FUNCTION_EXTERNAL_DEPExternal modules to be used String
ALLOW_BUILTIN_DEPAllow project dependencies to be used such as cheerio, typeormBoolean
+ +{% code title=".env" %} +```bash +# Allows usage of all builtin modules +TOOL_FUNCTION_BUILTIN_DEP=* + +# Allows usage of only fs +TOOL_FUNCTION_BUILTIN_DEP=fs + +# Allows usage of only crypto and fs +TOOL_FUNCTION_BUILTIN_DEP=crypto,fs + +# Allow usage of external npm modules. +TOOL_FUNCTION_EXTERNAL_DEP=cheerio,typeorm + +ALLOW_BUILTIN_DEP=true +``` +{% endcode %} + +### NodeVM Execution Error: VMError: Cannot find module + +If you are using library that is not allowed by default, you can either: + +1. Allow all project's [libraries/dependencies](https://github.com/FlowiseAI/Flowise/blob/main/packages/components/src/utils.ts#L52): `ALLOW_BUILTIN_DEP=true` +2. (Recommended) Specifically allow certain libraries/dependencies: `TOOL_FUNCTION_EXTERNAL_DEP=cheerio,typeorm` + +## Security Configuration + +
VariableDescriptionOptionsDefault
HTTP_DENY_LISTBlocks HTTP requests to specified URLs or domains in MCP serversComma-separated URLs/domains(empty)
CUSTOM_MCP_SECURITY_CHECKEnables comprehensive security validation for Custom MCP configurationstrue | falsetrue
CUSTOM_MCP_PROTOCOLSets the default protocol for Custom MCP communicationstdio | ssestdio
+ +#### `CUSTOM_MCP_SECURITY_CHECK=true` + +By default, this is enabled. When enabled, applies the following security validations: + +* **Command Allowlist**: Only permits safe commands (`node`, `npx`, `python`, `python3`, `docker`) +* **Argument Validation**: Blocks dangerous file paths, directory traversal, and executable files +* **Injection Prevention**: Prevents shell metacharacters and command chaining +* **Environment Protection**: Blocks modification of critical environment variables (PATH, LD\_LIBRARY\_PATH) + +#### `CUSTOM_MCP_PROTOCOL` + +* **`stdio`**: Direct process communication (default, requires command execution) +* **`sse`**: Server-Sent Events over HTTP (recommended for production, more secure) + +### Recommended Production Settings + +```bash +# Enable security validation (default) +CUSTOM_MCP_SECURITY_CHECK=true + +# Use SSE protocol for better security +CUSTOM_MCP_PROTOCOL=sse + +# Block dangerous domains (example) +HTTP_DENY_LIST=localhost,127.0.0.1,internal.company.com +``` + +> **Warning**: Disabling `CUSTOM_MCP_SECURITY_CHECK` allows arbitrary command execution and poses significant security risks in production environments. + +## Examples of how to set environment variables + +### NPM + +You can set all these variables when running Flowise using npx. For example: + +``` +npx flowise start --PORT=3000 --DEBUG=true +``` + +### Docker + +``` +docker run -d -p 5678:5678 flowise \ + -e DATABASE_TYPE=postgresdb \ + -e DATABASE_PORT= \ + -e DATABASE_HOST= \ + -e DATABASE_NAME= \ + -e DATABASE_USER= \ + -e DATABASE_PASSWORD= \ +``` + +### Docker Compose + +You can set all these variables in the `.env` file inside `docker` folder. Refer to [.env.example](https://github.com/FlowiseAI/Flowise/blob/main/docker/.env.example) file. diff --git a/fr/configuration/rate-limit.md b/fr/configuration/rate-limit.md new file mode 100644 index 00000000..cee605d2 --- /dev/null +++ b/fr/configuration/rate-limit.md @@ -0,0 +1,31 @@ +--- +description: Apprenez à gérer les demandes d'API dans Flowise +--- + +# Limite de Taux + +*** + +Lorsque vous partagez votre chatflow publiquement sans autorisation API via l'API ou le chat intégré, n'importe qui peut accéder au flux. Pour éviter le spam, vous pouvez définir la limite de taux sur votre chatflow. + +
+ +* **Limite de Messages par Durée** : Combien de messages peuvent être reçus dans une durée spécifique. Ex : 20 +* **Durée en Secondes** : La durée spécifiée. Ex : 60 +* **Message de Limite** : Quel message retourner lorsque la limite est dépassée. Ex : Quota Dépassé + +En utilisant l'exemple ci-dessus, cela signifie que seulement 20 messages peuvent être reçus en 60 secondes. La limitation de taux est suivie par adresse IP. Si vous avez déployé Flowise sur un service cloud, vous devrez définir la variable d'environnement `NUMBER_OF_PROXIES`. + +## Configuration de la Limite de Taux + +Lorsque vous hébergez Flowise sur un cloud tel qu'AWS, GCP, Azure, etc., il est probable que vous soyez derrière un proxy/équilibreur de charge. Par conséquent, la limite de taux pourrait ne pas fonctionner. Plus d'infos peuvent être trouvées [ici](https://github.com/express-rate-limit/express-rate-limit/wiki/Troubleshooting-Proxy-Issues). + +Pour résoudre le problème : + +1. **Définir la Variable d'Environnement :** Créez une variable d'environnement nommée `NUMBER_OF_PROXIES` et définissez sa valeur à `0` dans votre environnement d'hébergement. +2. **Redémarrez votre instance Flowise hébergée :** Cela permet à Flowise d'appliquer les modifications des variables d'environnement. +3. **Vérifiez l'Adresse IP :** Pour vérifier l'adresse IP, accédez à l'URL suivante : `{{hosted_url}}/api/v1/ip`. Vous pouvez le faire en entrant l'URL dans votre navigateur ou en effectuant une requête API. +4. **Comparer l'Adresse IP** Après avoir effectué la requête, comparez l'adresse IP retournée avec votre adresse IP actuelle. Vous pouvez trouver votre adresse IP actuelle en visitant l'un de ces sites : + * [http://ip.nfriedly.com/](http://ip.nfriedly.com/) + * [https://api.ipify.org/](https://api.ipify.org/) +5. **Adresse IP Incorrecte :** Si l'adresse IP retournée ne correspond pas à votre adresse IP actuelle, augmentez `NUMBER_OF_PROXIES` de 1 et redémarrez votre instance Flowise. Répétez ce processus jusqu'à ce que l'adresse IP corresponde à la vôtre. \ No newline at end of file diff --git a/fr/configuration/running-flowise-behind-company-proxy.md b/fr/configuration/running-flowise-behind-company-proxy.md new file mode 100644 index 00000000..d55806a0 --- /dev/null +++ b/fr/configuration/running-flowise-behind-company-proxy.md @@ -0,0 +1,19 @@ +# Exécuter Flowise derrière un proxy d'entreprise + +Si vous exécutez Flowise dans un environnement qui nécessite un proxy, comme au sein d'un réseau organisationnel, vous pouvez configurer Flowise pour acheminer toutes ses requêtes backend via un proxy de votre choix. Cette fonctionnalité est alimentée par le package `global-agent`. + +[https://github.com/gajus/global-agent](https://github.com/gajus/global-agent) + +## Configuration + +Il y a 2 variables d'environnement dont vous aurez besoin pour exécuter Flowise derrière un proxy d'entreprise : + +| Variable | Objectif | Requis | +| -------------------------- | -------------------------------------------------------------------------------- | -------- | +| `GLOBAL_AGENT_HTTP_PROXY` | Où acheminer toutes les requêtes HTTP du serveur | Oui | +| `GLOBAL_AGENT_HTTPS_PROXY` | Où acheminer toutes les requêtes HTTPS du serveur | Non | +| `GLOBAL_AGENT_NO_PROXY` | Un motif d'URLs qui doivent être exclues du proxy. Par exemple, `*.foo.com,baz.com` | Non | + +## Liste blanche des connexions sortantes + +Pour le plan entreprise, vous devez autoriser plusieurs connexions sortantes pour la vérification de licence. Veuillez contacter support@flowiseai.com pour plus d'informations. \ No newline at end of file diff --git a/fr/configuration/running-flowise-using-queue.md b/fr/configuration/running-flowise-using-queue.md new file mode 100644 index 00000000..0d8b3e8f --- /dev/null +++ b/fr/configuration/running-flowise-using-queue.md @@ -0,0 +1,201 @@ +# Exécution de Flowise en mode Queue + +Par défaut, Flowise s'exécute dans un thread principal NodeJS. Cependant, avec un grand nombre de prédictions, cela ne s'adapte pas bien. Il existe donc 2 modes que vous pouvez configurer : `main` (par défaut) et `queue`. + +## Mode Queue + +Avec les variables d'environnement suivantes, vous pouvez exécuter Flowise en mode `queue`. + +
VariableDescriptionTypePar défaut
MODEMode d'exécution de FlowiseEnum String : main, queuemain
WORKER_CONCURRENCYNombre de tâches autorisées à être traitées en parallèle pour un travailleur. Si vous avez 1 travailleur, cela signifie combien de tâches de prédiction simultanées il peut gérer. Plus d'infosNombre10000
QUEUE_NAMELe nom de la file de messagesStringflowise-queue
QUEUE_REDIS_EVENT_STREAM_MAX_LENLe flux d'événements est automatiquement réduit afin que sa taille ne croisse pas trop. Plus d'infosNombre10000
REDIS_URLURL RedisString
REDIS_HOSTHôte RedisStringlocalhost
REDIS_PORTPort RedisNombre6379
REDIS_USERNAMENom d'utilisateur Redis (optionnel)String
REDIS_PASSWORDMot de passe Redis (optionnel)String
REDIS_TLSConnexion TLS Redis (optionnel) Plus d'infosBooleanfalse
REDIS_CERTCertificat auto-signé RedisString
REDIS_KEYFichier clé du certificat auto-signé RedisString
REDIS_CAFichier CA du certificat auto-signé RedisString
+ +En mode `queue`, le serveur principal sera responsable du traitement des demandes, en envoyant des tâches à la file de messages. Le serveur principal n'exécutera pas la tâche. Un ou plusieurs travailleurs reçoivent des tâches de la file, les exécutent et renvoient les résultats. + +Cela permet une mise à l'échelle dynamique : vous pouvez ajouter des travailleurs pour gérer des charges de travail accrues ou les retirer pendant les périodes plus légères. + +Voici comment cela fonctionne : + +1. Le serveur principal reçoit des demandes de prédiction ou d'autres demandes du web, les ajoutant comme tâches à la file. +2. Ces files de tâches sont des listes essentielles de tâches en attente d'être traitées. Les travailleurs, qui sont essentiellement des processus ou des threads séparés, prennent ces tâches et les exécutent. +3. Une fois la tâche terminée, le travailleur : + * Écrit les résultats dans la base de données. + * Envoie un événement pour indiquer l'achèvement de la tâche. +4. Le serveur principal reçoit l'événement et renvoie le résultat à l'interface utilisateur. +5. Redis pub/sub est également utilisé pour diffuser des données vers l'interface utilisateur. + +
+ +## Diagramme de Flux + +
+ +#### 1. Point d'Entrée de la Demande + +Une demande de prédiction atteint le serveur Express et vérifie immédiatement si `MODE=QUEUE`. Si c'est vrai, il passe de l'exécution directe au traitement asynchrone en file d'attente. + +#### 2. Création de Tâche & Canaux Doubles + +Le système crée deux chemins parallèles : + +* **Canal de Tâche** : Les données de la demande deviennent une tâche Redis via BullMQ, le thread HTTP attend l'achèvement +* **Canal de Flux** : Connexion SSE établie pour des mises à jour en temps réel via la publication/souscription Redis + +#### 3. Traitement par le Travailleur + +Des processus de travailleurs indépendants interrogent Redis pour des tâches. Lorsqu'une tâche est assignée : + +* Reconstruire le contexte d'exécution complet (DB, composants, contrôleurs d'abandon) +* Exécuter le flux de travail avec un traitement nœud par nœud +* Publier des événements en temps réel (tokens, outils, progrès) sur les canaux Redis + +#### 4. Communication en Temps Réel + +Pendant l'exécution : + +* [**RedisEventPublisher**](https://github.com/FlowiseAI/Flowise/blob/main/packages/server/src/queue/RedisEventPublisher.ts) diffuse des événements du travailleur vers Redis +* [**RedisEventSubscriber**](https://github.com/FlowiseAI/Flowise/blob/main/packages/server/src/queue/RedisEventSubscriber.ts) transmet des événements de Redis aux clients SSE +* [**SSEStreamer**](https://github.com/FlowiseAI/Flowise/blob/main/packages/server/src/utils/SSEStreamer.ts) livre des événements au navigateur en temps réel + +#### 5. Achèvement & Réponse + +La tâche se termine, le résultat est stocké dans Redis : + +* Le thread HTTP se débloque, reçoit le résultat +* La connexion SSE se ferme proprement +* Les ressources sont nettoyées (contrôleurs d'abandon, connexions) + +## Configuration Locale + +### Démarrer Redis + +Avant de démarrer le serveur principal et les travailleurs, Redis doit d'abord être en cours d'exécution. Vous pouvez exécuter Redis sur une machine séparée, mais assurez-vous qu'il est accessible par les instances de serveur et de travailleurs. + +Par exemple, vous pouvez faire fonctionner Redis sur votre Docker en suivant ce [guide](https://www.docker.com/blog/how-to-use-the-redis-docker-official-image/). + +### Démarrer le Serveur Principal + +C'est la même procédure que celle que vous suivriez pour exécuter Flowise par défaut, à l'exception de la configuration des variables d'environnement mentionnées ci-dessus. + +```bash +pnpm start +``` + +### Démarrer le Travailleur + +Tout comme pour le serveur principal, les variables d'environnement ci-dessus doivent être configurées. Nous recommandons d'utiliser le même fichier `.env` pour les instances principales et de travail. La seule différence réside dans la façon de lancer les travailleurs. Ouvrez un autre terminal et exécutez : + +```bash +pnpm run start-worker +``` + +{% hint style="warning" %} +Le serveur principal et le worker doivent partager la même clé secrète. Référez-vous à [#for-credentials](environment-variables.md#for-credentials "mention"). Pour la production, nous recommandons d'utiliser Postgres comme base de données pour des performances optimales. +{% endhint %} + +## Configuration de Docker + +### Méthode 1 : Images préconstruites (Recommandée) + +Cette méthode utilise des images Docker préconstruites depuis Docker Hub, ce qui en fait l'option de déploiement la plus rapide et la plus fiable. + +**Étape 1 : Configurer l'environnement** + +Créez un fichier `.env` dans le répertoire `docker` : + +```bash +# Basic Configuration +PORT=3000 +WORKER_PORT=5566 + +# Queue Configuration (Required) +MODE=queue +QUEUE_NAME=flowise-queue +REDIS_URL=redis://redis:6379 + +# Optional Queue Settings +WORKER_CONCURRENCY=5 +REMOVE_ON_AGE=24 +REMOVE_ON_COUNT=1000 +QUEUE_REDIS_EVENT_STREAM_MAX_LEN=1000 +ENABLE_BULLMQ_DASHBOARD=false + +# Database (Optional - defaults to SQLite) +DATABASE_PATH=/root/.flowise + +# Storage +BLOB_STORAGE_PATH=/root/.flowise/storage + +# Secret Keys +SECRETKEY_PATH=/root/.flowise + +# Logging +LOG_PATH=/root/.flowise/logs +``` + +**Étape 2 : Déployer** + +```bash +cd docker +docker compose -f docker-compose-queue-prebuilt.yml up -d +``` + +**Étape 3 : Vérifier le déploiement** + +```bash +# Check container status +docker compose -f docker-compose-queue-prebuilt.yml ps + +# View logs +docker compose -f docker-compose-queue-prebuilt.yml logs -f flowise +docker compose -f docker-compose-queue-prebuilt.yml logs -f flowise-worker +``` + +### Méthode 2 : Construire à partir du code source + +Cette méthode construit Flowise à partir du code source, utile pour le développement ou les modifications personnalisées. + +**Étape 1 : Configurer l'environnement** + +Créez le même fichier `.env` que dans [Méthode 1](running-flowise-using-queue.md#method-1-pre-built-images-recommended). + +**Étape 2 : Déployer** + +```bash +cd docker +docker compose -f docker-compose-queue-source.yml up -d +``` + +**Étape 3 : Processus de construction** + +La construction source va : + +* Construire l'application principale Flowise à partir du code source +* Construire l'image du worker à partir du code source +* Configurer Redis et le réseau + +**Étape 4 : Surveiller la construction** + +```bash +# Watch build progress +docker compose -f docker-compose-queue-source.yml logs -f + +# Check final status +docker compose -f docker-compose-queue-source.yml ps +``` + +### Vérifications de santé + +Tous les fichiers compose incluent des vérifications de santé : + +```bash +# Check main instance health +curl http://localhost:3000/api/v1/ping + +# Check worker health +curl http://localhost:5566/healthz +``` + +## Tableau de bord des files d'attente + +Définir `ENABLE_BULLMQ_DASHBOARD` sur true permettra aux utilisateurs de voir tous les travaux, l'état, le résultat et les données en naviguant vers `/admin/queues` + +
diff --git a/fr/configuration/running-in-production.md b/fr/configuration/running-in-production.md new file mode 100644 index 00000000..8c06201a --- /dev/null +++ b/fr/configuration/running-in-production.md @@ -0,0 +1,30 @@ +# Exécution en Production + +## Mode + +Lorsque vous exécutez en production, nous vous recommandons vivement d'utiliser le mode [Queue](running-flowise-using-queue.md) avec les paramètres suivants : + +* 2 serveurs principaux avec équilibrage de charge, chacun commençant avec 4vCPU 8 Go de RAM +* 4 travailleurs, chacun commençant avec 4vCPU 8 Go de RAM + +Vous pouvez configurer l'auto-scaling en fonction du trafic et du volume. + +## Base de données + +Par défaut, Flowise utilisera SQLite comme base de données. Cependant, lorsqu'il est exécuté à grande échelle, il est recommandé d'utiliser PostgresQL. + +## Stockage + +Actuellement, Flowise ne prend en charge que [AWS S3](https://aws.amazon.com/s3/) avec un plan pour prendre en charge d'autres fournisseurs de stockage d'objets. Cela permettra de stocker des fichiers et des journaux sur S3, au lieu d'un chemin de fichier local. Consultez [#for-storage](environment-variables.md#for-storage "mention") + +## Chiffrement + +Flowise utilise une clé de chiffrement pour chiffrer/déchiffrer les identifiants que vous utilisez, tels que les clés API OpenAI. Il est recommandé d'utiliser [AWS Secret Manager](https://aws.amazon.com/secrets-manager/) en production pour un meilleur contrôle de la sécurité et une rotation des clés. Consultez [#for-credentials](environment-variables.md#for-credentials "mention") + +## Limite de Taux + +Lorsqu'il est déployé dans le cloud/en local, il est probable que les instances soient derrière un proxy/un équilibreur de charge. L'adresse IP de la requête pourrait être celle de l'équilibreur de charge/proxy inverse, rendant le limiteur de taux effectivement global et bloquant toutes les requêtes une fois la limite atteinte ou `undefined`. Définir le bon `NUMBER_OF_PROXIES` peut résoudre le problème. Consultez [#rate-limit-setup](rate-limit.md#rate-limit-setup "mention") + +## Test de Charge + +Artillery peut être utilisé pour tester la charge de votre application Flowise déployée. Un exemple de script peut être trouvé [ici](https://github.com/FlowiseAI/Flowise/blob/main/artillery-load-test.yml). \ No newline at end of file diff --git a/fr/configuration/sso.md b/fr/configuration/sso.md new file mode 100644 index 00000000..01d8238b --- /dev/null +++ b/fr/configuration/sso.md @@ -0,0 +1,126 @@ +# SSO + +{% hint style="info" %} +Le SSO est uniquement disponible pour le plan Entreprise +{% endhint %} + +Flowise prend en charge [OIDC](https://openid.net/) qui permet aux utilisateurs d'utiliser _l'authentification unique_ (_SSO_) pour accéder à l'application. Actuellement, seul l'[Administrateur de l'organisation](../using-flowise/workspaces.md#setting-up-admin-account) peut configurer les paramètres SSO. + +## Microsoft + +1. Dans le portail Azure, recherchez Microsoft Entra ID : + +
+ +2. Dans la barre latérale gauche, cliquez sur Enregistrements d'applications, puis sur Nouvelle inscription : + +
+ +3. Entrez un nom d'application et sélectionnez Locataire unique : + +
+ +4. Après la création de l'application, notez l'ID d'application (client) et l'ID de répertoire (locataire) : + +
+ +5. Dans la barre latérale gauche, cliquez sur Certificats et secrets -> Nouveau secret client -> Ajouter : + +
+ +6. Après la création du secret, copiez la Valeur, pas l'ID de secret : + +
+ +7. Dans la barre latérale gauche, cliquez sur Authentification -> Ajouter une plateforme -> Web : + +
+ +8. Remplissez les URI de redirection. Cela devra être modifié en fonction de la façon dont vous l'hébergez : `http[s]://[votre-instance-flowise.com]/api/v1/azure/callback` : + +
+ +9. Vous devriez voir la nouvelle URI de redirection créée : + +
+ +10. Retournez à l'application Flowise, connectez-vous en tant qu'Administrateur de l'organisation. Accédez à la configuration SSO depuis la barre latérale gauche. Remplissez l'ID de locataire Azure et l'ID client de l'étape 4, et le secret client de l'étape 6. Cliquez sur Tester la configuration pour voir si la connexion peut être établie avec succès : + +
+ +11. Enfin, activez et enregistrez : + +
+ +12. Avant que les utilisateurs puissent se connecter en utilisant le SSO, ils doivent d'abord être invités. Consultez [Inviter des utilisateurs pour la connexion SSO](sso.md#inviting-users-for-sso-sign-in) pour un guide étape par étape. Les utilisateurs invités doivent également faire partie des utilisateurs du répertoire dans Azure. + +
+ +## Google + +Pour activer la connexion avec Google sur votre site web, vous devez d'abord configurer votre ID client API Google. Pour ce faire, suivez les étapes suivantes : + +1. Ouvrez la page **Identifiants** de la [console API Google](https://console.developers.google.com/apis). +2. Cliquez sur **Créer des identifiants** > **ID client OAuth** + +
+ +3\. Sélectionnez **Application Web** : + +
+ +4\. Remplissez les URI de redirection. Cela devra être modifié en fonction de la façon dont vous l'hébergez : `http[s]://[votre-instance-flowise.com]/api/v1/google/callback` : + +
+ +5\. Après la création, récupérez l'ID client et le secret : + +
+ +6\. Retournez à l'application Flowise, ajoutez l'ID client et le secret. Testez la connexion et enregistrez-le. + +
+ +## Auth0 + +1. Inscrivez-vous sur [Auth0](https://auth0.com/), puis créez une nouvelle application + +
+ +2. Sélectionnez **Application Web régulière** : + +
+ +3. Configurez les champs tels que Nom, Description. Prenez note du **Domaine**, de l'**ID client** et du **Secret client**. + +
+ +4\. Remplissez les URI de l'application. Cela devra être modifié en fonction de la façon dont vous l'hébergez : `http[s]://[votre-instance-flowise.com]/api/v1/auth0/callback` : + +
+ +5. Dans l'onglet API, assurez-vous que l'API de gestion Auth0 est activée avec les permissions suivantes + * read:users + * read:client\_grants + +
+ +6\. Retournez à l'application Flowise, remplissez le Domaine, l'ID client et le Secret. Testez et enregistrez la configuration. + +
+ +## Inviter des utilisateurs pour la connexion SSO + +Pour qu'un nouvel utilisateur puisse se connecter, vous devez inviter de nouveaux utilisateurs dans l'application Flowise. Cela est essentiel pour garder une trace du rôle / espace de travail de l'utilisateur invité. Consultez la section [Inviter des utilisateurs](../using-flowise/workspaces.md#invite-user) pour la configuration des variables d'environnement. + +L'utilisateur invité recevra un lien d'invitation pour se connecter : + +
+ +En cliquant sur le bouton, l'utilisateur invité sera directement dirigé vers l'écran de connexion SSO de Flowise : + +
+ +Ou naviguez vers l'application Flowise et connectez-vous avec SSO : + +
\ No newline at end of file diff --git a/fr/contributing/README.md b/fr/contributing/README.md new file mode 100644 index 00000000..bd087030 --- /dev/null +++ b/fr/contributing/README.md @@ -0,0 +1,59 @@ +--- +description: Apprenez à contribuer à ce projet +--- + +# Guide de Contribution + +*** + +Nous apprécions toutes les contributions ! Peu importe votre niveau de compétence ou votre parcours technique, vous pouvez aider ce projet à grandir. Voici quelques façons de contribuer : + +## ⭐ Étoile + +Étoile et partagez le [Github Repo](https://github.com/FlowiseAI/Flowise). + +## 🙌 Partager Chatflow + +Oui ! Partager comment vous utilisez Flowise est une façon de contribuer. Exportez votre chatflow au format JSON, joignez une capture d'écran et partagez-le dans la [section Show and Tell](https://github.com/FlowiseAI/Flowise/discussions/categories/show-and-tell). + +## 💡 Idées + +Nous accueillons les idées pour de nouvelles fonctionnalités et intégrations d'applications. Soumettez vos suggestions dans la [section Idées](https://github.com/FlowiseAI/Flowise/discussions/categories/ideas). + +## 🙋 Q\&A + +Vous voulez en savoir plus ? Recherchez des réponses à vos questions dans la [section Q\&A](https://github.com/FlowiseAI/Flowise/discussions/categories/q-a). Si vous ne trouvez pas de réponse, n'hésitez pas à créer une nouvelle question. Cela pourrait aider d'autres personnes ayant des questions similaires. + +## 🐞 Signaler des Bugs + +Vous avez trouvé un problème ? [Signalez-le](https://github.com/FlowiseAI/Flowise/issues/new/choose). + +## 📖 Contribuer à la Documentation + +1. Forkez le [Flowise Docs Repo](https://github.com/FlowiseAI/FlowiseDocs) officiel +2. Clonez votre dépôt forké +3. Créez une nouvelle branche +4. Passez à la branche que vous venez de créer +5. Allez dans le dossier du dépôt + + ```bash + cd FlowiseDocs + ``` +6. Apportez des modifications +7. Validez les modifications et soumettez une Pull Request depuis la branche forkée pointant vers [FlowiseDocs main](https://github.com/FlowiseAI/FlowiseDocs) + +## 👨‍💻 Contribuer au Code + +Pour apprendre à contribuer du code, rendez-vous dans la section [Pour les Développeurs](../getting-started/#setup-2) et suivez les instructions. + +Si vous contribuez à une nouvelle intégration de nœud, lisez le guide [Building Node](building-node.md). + +## 🏷️ Processus de Pull Request + +Un membre de l'équipe FlowiseAI sera automatiquement notifié/assigné lorsque vous ouvrirez une pull request. Vous pouvez également nous contacter sur [Discord](https://discord.gg/jbaHfsRVBW). + +## 📜 Code de Conduite + +Ce projet et toutes les personnes qui y participent sont régis par le Code de Conduite que vous pouvez trouver dans le [fichier](https://github.com/FlowiseAI/Flowise/blob/main/CODE\_OF\_CONDUCT.md). En participant, vous êtes censé respecter ce code. + +Veuillez signaler tout comportement inacceptable à hello@flowiseai.com. \ No newline at end of file diff --git a/fr/contributing/building-node.md b/fr/contributing/building-node.md new file mode 100644 index 00000000..683a67a7 --- /dev/null +++ b/fr/contributing/building-node.md @@ -0,0 +1,125 @@ +# Construction de Node + +### Installer Git + +Tout d'abord, installez Git et clonez le dépôt Flowise. Vous pouvez suivre les étapes du guide [Commencer](broken-reference). + +### Structure + +Flowise sépare chaque intégration de node dans le dossier `packages/components/nodes`. Essayons de créer un outil simple ! + +### Créer l'outil Calculatrice + +Créez un nouveau dossier nommé `Calculator` dans le dossier `packages/components/nodes/tools`. Ensuite, créez un nouveau fichier nommé `Calculator.ts`. À l'intérieur du fichier, nous allons d'abord écrire la classe de base. + +```javascript +import { INode } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' + +class Calculator_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + author: string + baseClasses: string[] + + constructor() { + this.label = 'Calculator' + this.name = 'calculator' + this.version = 1.0 + this.type = 'Calculator' + this.icon = 'calculator.svg' + this.category = 'Tools' + this.author = 'Your Name' + this.description = 'Perform calculations on response' + this.baseClasses = [this.type, ...getBaseClasses(Calculator)] + } +} + +module.exports = { nodeClass: Calculator_Tools } +``` + +Every node will implements the `INode` base class. Breakdown of what each property means: + +
PropriétéDescription
labelLe nom du nœud qui apparaît dans l'interface utilisateur
nameLe nom utilisé par le code. Doit être camelCase
versionVersion du nœud
typeGénéralement identique au label. Pour définir quel nœud peut être connecté à ce type spécifique dans l'interface utilisateur
iconIcône du nœud
categoryCatégorie du nœud
authorCréateur du nœud
descriptionDescription du nœud
baseClassesLes classes de base du nœud, puisque un nœud peut s'étendre à partir d'un composant de base. Utilisé pour définir quel nœud peut être connecté à ce nœud dans l'interface utilisateur
+ +### Définir la classe + +Maintenant que la classe de composant est partiellement terminée, nous pouvons procéder à la définition de la classe Tool réelle, dans ce cas - `Calculator`. + +Créez un nouveau fichier dans le même dossier `Calculator`, et nommez-le `core.ts` + +```javascript +import { Parser } from "expr-eval" +import { Tool } from "@langchain/core/tools" + +export class Calculator extends Tool { + name = "calculator" + description = `Useful for getting the result of a math expression. The input to this tool should be a valid mathematical expression that could be executed by a simple calculator.` + + async _call(input: string) { + try { + return Parser.evaluate(input).toString() + } catch (error) { + return "I don't know how to do that." + } + } +} +``` + +### Finishing + +Retournez au fichier `Calculator.ts`, nous pouvons terminer cela en ajoutant la fonction `async init`. Dans cette fonction, nous allons initialiser la classe Calculator que nous avons créée ci-dessus. Lorsque le flux est exécuté, la fonction `init` dans chaque nœud sera appelée, et la fonction `_call` sera exécutée lorsque le LLM décidera d'appeler cet outil. + +```javascript +import { INode } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' +import { Calculator } from './core' + +class Calculator_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + author: string + baseClasses: string[] + + constructor() { + this.label = 'Calculator' + this.name = 'calculator' + this.version = 1.0 + this.type = 'Calculator' + this.icon = 'calculator.svg' + this.category = 'Tools' + this.author = 'Your Name' + this.description = 'Perform calculations on response' + this.baseClasses = [this.type, ...getBaseClasses(Calculator)] + } + + + async init() { + return new Calculator() + } +} + +module.exports = { nodeClass: Calculator_Tools } +``` + +### Construire et Exécuter + +Dans le fichier `.env` à l'intérieur de `packages/server`, créez une nouvelle variable d'environnement : + +```javascript +SHOW_COMMUNITY_NODES=true +``` + +Maintenant, nous pouvons utiliser `pnpm build` et `pnpm start` pour donner vie au composant ! + +
diff --git a/fr/getting-started/README.md b/fr/getting-started/README.md new file mode 100644 index 00000000..fddc2d60 --- /dev/null +++ b/fr/getting-started/README.md @@ -0,0 +1,216 @@ +# Commencer + +*** + +## Cloud + +L'auto-hébergement nécessite plus de compétences techniques pour configurer l'instance, sauvegarder la base de données et maintenir les mises à jour. Si vous n'êtes pas expérimenté dans la gestion des serveurs et que vous souhaitez simplement utiliser l'application web, nous vous recommandons d'utiliser [Flowise Cloud](https://cloud.flowiseai.com). + +## Démarrage rapide + +{% hint style="info" %} +Prérequis : assurez-vous que [NodeJS](https://nodejs.org/en/download) est installé sur la machine. Node `v18.15.0` ou `v20` et supérieur est pris en charge. +{% endhint %} + +Installez Flowise localement en utilisant NPM. + +1. Installez Flowise: + +```bash +npm install -g flowise +``` + +Vous pouvez également installer une version spécifique. Consultez les [versions](https://www.npmjs.com/package/flowise?activeTab=versions) disponibles. + +``` +npm install -g flowise@x.x.x +``` + +2. Démarrer Flowise : + +```bash +npx flowise start +``` + +3. Ouvrez : [http://localhost:3000](http://localhost:3000) + +*** + +## Docker + +Il existe deux façons de déployer Flowise avec Docker. Tout d'abord, clonez le projet : [https://github.com/FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) + +### Docker Compose + +1. Allez dans le dossier `docker` à la racine du projet +2. Copiez le fichier `.env.example` et collez-le sous un autre nom de fichier `.env` +3. Exécutez : + +```bash +docker compose up -d +``` + +4. Ouvrez : [http://localhost:3000](http://localhost:3000) +5. Vous pouvez arrêter les conteneurs en exécutant : + +```bash +docker compose stop +``` + +### Image Docker + +1. Construisez l'image : + +```bash +docker build --no-cache -t flowise . +``` + +2. Exécuter l'image : + +```bash +docker run -d --name flowise -p 3000:3000 flowise +``` + +3. Arrêter l'image : + +```bash +docker stop flowise +``` + +*** + +## Pour les développeurs + +Flowise dispose de 4 modules différents dans un seul dépôt mono : + +* **Serveur** : backend Node pour gérer la logique de l'API +* **UI** : frontend React +* **Composants** : composants d'intégration +* **Documentation de l'API** : spécification Swagger pour les APIs Flowise + +### Prérequis + +Installez [PNPM](https://pnpm.io/installation). + +```bash +npm i -g pnpm +``` + +### Configuration 1 + +Configuration simple utilisant PNPM : + +1. Clonez le dépôt + +```bash +git clone https://github.com/FlowiseAI/Flowise.git +``` + +2. Accédez au dossier du dépôt + +```bash +cd Flowise +``` + +3. Installez toutes les dépendances de tous les modules : + +```bash +pnpm install +``` + +4. Construire le code : + +```bash +pnpm build +``` + +Démarrez l'application à [http://localhost:3000](http://localhost:3000) + +```bash +pnpm start +``` + +### Configuration 2 + +Instructions étape par étape pour les contributeurs du projet : + +1. Forkez le [dépôt Github officiel de Flowise](https://github.com/FlowiseAI/Flowise) +2. Clonez votre dépôt forké +3. Créez une nouvelle branche, consultez le [guide](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository). Conventions de nommage : + * Pour la branche de fonctionnalité : `feature/` + * Pour la branche de correction de bogue : `bugfix/`. +4. Passez à la branche que vous venez de créer +5. Accédez au dossier du dépôt : + +```bash +cd Flowise +``` + +6. Installez toutes les dépendances de tous les modules : + +```bash +pnpm install +``` + +7. Construire le code : + +```bash +pnpm build +``` + +8. Démarrez l'application à [http://localhost:3000](http://localhost:3000) + +```bash +pnpm start +``` + +9. Pour la construction de développement : + +* Créez un fichier `.env` et spécifiez le `PORT` (voir `.env.example`) dans `packages/ui` +* Créez un fichier `.env` et spécifiez le `PORT` (voir `.env.example`) dans `packages/server` + +```bash +pnpm dev +``` + +* Toute modification apportée dans `packages/ui` ou `packages/server` sera reflétée sur [http://localhost:8080](http://localhost:8080/) +* Pour les modifications apportées dans `packages/components`, vous devrez reconstruire pour prendre en compte les changements +* Après avoir effectué toutes les modifications, exécutez : + + ```bash + pnpm build + ``` + + et + + ```bash + pnpm start + ``` + + pour vous assurer que tout fonctionne correctement en production. + +*** + +## Pour les entreprises + +Avant de démarrer l'application, les utilisateurs d'entreprise doivent remplir les valeurs des paramètres d'entreprise dans le fichier `.env`. Consultez `.env.example` pour les modifications requises. + +Contactez support@flowiseai.com pour la valeur des variables d'environnement suivantes : + +``` +LICENSE_URL +FLOWISE_EE_LICENSE_KEY +``` + +*** + +## En savoir plus + +Dans ce tutoriel vidéo, Leon présente une introduction à Flowise et explique comment l'installer sur votre machine locale. + +{% embed url="https://youtu.be/nqAK_L66sIQ" %} + +## Guide de la communauté + +* [Introduction à la construction d'applications LLM avec Flowise / LangChain \[Pratique\]](https://volcano-ice-cd6.notion.site/Introduction-to-Practical-Building-LLM-Applications-with-Flowise-LangChain-03d6d75bfd20495d96dfdae964bea5a5) +* [Introduction à la construction d'applications LLM avec Flowise / LangChain \[Pratique\]](https://volcano-ice-cd6.notion.site/Flowise-LangChain-LLM-e106bb0f7e2241379aad8fa428ee064a) diff --git a/fr/integrations/3rd-party-platform-integration/README.md b/fr/integrations/3rd-party-platform-integration/README.md new file mode 100644 index 00000000..6a70d94d --- /dev/null +++ b/fr/integrations/3rd-party-platform-integration/README.md @@ -0,0 +1,11 @@ +--- +description: Apprenez à intégrer Flowise avec des plateformes tierces +--- + +# Intégrations externes + +*** + +Flowise peut également être utilisé sur des plateformes tierces. Voici quelques exemples d'utilisation : + +* [Zapier Zaps](zapier-zaps.md) \ No newline at end of file diff --git a/fr/integrations/3rd-party-platform-integration/open-webui.md b/fr/integrations/3rd-party-platform-integration/open-webui.md new file mode 100644 index 00000000..0d22c77f --- /dev/null +++ b/fr/integrations/3rd-party-platform-integration/open-webui.md @@ -0,0 +1,213 @@ +# Open WebUI + +[Open WebUI](https://github.com/open-webui/open-webui) est une plateforme _IA auto-hébergée_ extensible, riche en fonctionnalités et conviviale, conçue pour fonctionner entièrement hors ligne. + +[Les Fonctions](https://docs.openwebui.com/features/plugin/functions/) sont comme des plugins pour Open WebUI. Nous pouvons créer une [Fonction Pipe](https://docs.openwebui.com/features/plugin/functions/pipe) personnalisée qui traite les entrées et génère des réponses en invoquant l'API de prédiction Flowise avant de retourner les résultats à l'utilisateur. Grâce à cela, Flowise peut être utilisé dans Open WebUI. + +## Configuration + +1. Tout d'abord, assurez-vous qu'Open WebUI est opérationnel, vous pouvez consulter le guide [Démarrage rapide](https://docs.openwebui.com/getting-started/quick-start/). En bas à gauche, cliquez sur votre profil et **Panneau d'administration** + +
+ +2. Ouvrez l'onglet **Fonctions**, et ajoutez une nouvelle Fonction. + +
+ +3. Nommez la Fonction, et ajoutez le code suivant : + +```python +""" +title: Flowise Integration for OpenWebUI +Requirements: + - Flowise API URL (set via FLOWISE_API_URL) + - Flowise API Key (set via FLOWISE_API_KEY) +""" + +from pydantic import BaseModel, Field +from typing import Optional, Dict, Any, List, Union, Generator, Iterator +import requests +import json +import os + + +class Pipe: + class Valves(BaseModel): + flowise_url: str = Field( + default=os.getenv("FLOWISE_API_URL", ""), + description="Flowise URL", + ) + flowise_api_key: str = Field( + default=os.getenv("FLOWISE_API_KEY", ""), + description="Flowise API key for authentication", + ) + + def __init__(self): + self.type = "manifold" + self.id = "flowise_chat" + self.valves = self.Valves() + + # Validate required settings + if not self.valves.flowise_url: + print( + "⚠️ Please set your Flowise URL using the FLOWISE_API_URL environment variable" + ) + if not self.valves.flowise_api_key: + print( + "⚠️ Please set your Flowise API key using the FLOWISE_API_KEY environment variable" + ) + + def pipes(self): + if self.valves.flowise_api_key and self.valves.flowise_url: + try: + headers = { + "Authorization": f"Bearer {self.valves.flowise_api_key}", + "Content-Type": "application/json", + } + + r = requests.get( + f"{self.valves.flowise_url}/api/v1/chatflows?type=AGENTFLOW", + headers=headers, + ) + models = r.json() + return [ + { + "id": model["id"], + "name": model["name"], + } + for model in models + ] + + except Exception as e: + return [ + { + "id": "error", + "name": str(e), + }, + ] + else: + return [ + { + "id": "error", + "name": "API Key not provided.", + }, + ] + + def _process_message_content(self, message: dict) -> str: + """Process message content, handling text for now""" + if isinstance(message.get("content"), list): + processed_content = [] + for item in message["content"]: + if item["type"] == "text": + processed_content.append(item["text"]) + return " ".join(processed_content) + return message.get("content", "") + + def pipe( + self, body: dict, __user__: Optional[dict] = None, __metadata__: dict = None + ): + try: + stream_enabled = body.get("stream", True) + session_id = (__metadata__ or {}).get("chat_id") or "owui-session" + # model can be "flowise." or just "" + model_name = body.get("model", "") + dot = model_name.find(".") + model_id = model_name[dot + 1 :] if dot != -1 else model_name + + messages = body.get("messages") or [] + if not messages: + raise Exception("No messages found in request body") + question = self._process_message_content(messages[-1]) + + data = { + "question": question, + "overrideConfig": {"sessionId": session_id}, + "streaming": stream_enabled, + } + + headers = { + "Authorization": f"Bearer {self.valves.flowise_api_key}", + "Content-Type": "application/json", + "Accept": "text/event-stream" if stream_enabled else "application/json", + } + + url = f"{self.valves.flowise_url}/api/v1/prediction/{model_id}" + with requests.post( + url, json=data, headers=headers, stream=stream_enabled, timeout=60 + ) as r: + r.raise_for_status() + + if stream_enabled: + # Ensure correct decoding for SSE (prevents ’ etc.) + r.encoding = "utf-8" + + for raw_line in r.iter_lines(decode_unicode=True): + if not raw_line: + continue + line = raw_line.strip() + + # Skip keep-alives or non-data fields + if not line.startswith("data:"): + continue + + payload = line[5:].strip() + if payload in ("[DONE]", '"[DONE]"'): + break + + # Flowise usually sends {"event":"token","data":"..."} + try: + obj = json.loads(payload) + except json.JSONDecodeError: + # Occasionally plain text arrives—stream it anyway + if payload: + yield payload + continue + + if isinstance(obj, dict): + if obj.get("event") == "token": + token = obj.get("data") or "" + if token: + yield token + else: + # Some versions send {"data":{"text":"..."}} + data_field = obj.get("data") + if isinstance(data_field, dict): + text = data_field.get("text") + if text: + yield text + return # end streaming + + # Non-streaming fallback + resp = r.json() + return ( + resp.get("text") or (resp.get("data") or {}).get("text", "") or "" + ) + + except requests.HTTPError as http_err: + try: + detail = http_err.response.text[:500] + except Exception: + detail = "" + return f"HTTP error from Flowise: {http_err.response.status_code} {detail}" + except Exception as e: + return f"Error in Flowise pipe: {e}" +``` +```markdown +4. Après avoir enregistré la fonction, activez-la et cliquez sur le bouton des paramètres pour entrer votre URL Flowise et votre clé API Flowise : + +
+ +
+ +5. Maintenant, lorsque vous actualisez et cliquez sur Nouveau Chat, vous pourrez voir la liste des flux. Vous pouvez modifier le code pour afficher : + +* Seulement les Agentflows V2 : `f"{self.valves.flowise_url}/api/v1/chatflows?type=AGENTFLOW"` +* Seulement les Chatflows : `f"{self.valves.flowise_url}/api/v1/chatflows?type=CHATFLOW"` +* Seulement les Assistants : `f"{self.valves.flowise_url}/api/v1/chatflows?type=ASSISTANT"` + +
+ +6. Test : + +
+``` \ No newline at end of file diff --git a/fr/integrations/3rd-party-platform-integration/streamlit.md b/fr/integrations/3rd-party-platform-integration/streamlit.md new file mode 100644 index 00000000..f835a3d7 --- /dev/null +++ b/fr/integrations/3rd-party-platform-integration/streamlit.md @@ -0,0 +1,71 @@ +# Streamlit + +[Python SDK](https://github.com/FlowiseAI/FlowisePy) peut être utilisé pour créer une [Streamlit](https://streamlit.io/) application : + +```python +import streamlit as st +from flowise import Flowise, PredictionData +import json + +# Flowise app base url +base_url = st.secrets["APP_URL"] or "https://your-flowise-url.com" + +# Chatflow/Agentflow ID +flow_id = st.secrets["FLOW_ID"] or "abc" + +# Show title and description. +st.title("💬 Flowise Streamlit Chat") +st.write( + "This is a simple chatbot that uses Flowise Python SDK" +) + +# Create a Flowise client. +client = Flowise(base_url=base_url) + +# Create a session state variable to store the chat messages. This ensures that the +# messages persist across reruns. +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Display the existing chat messages via `st.chat_message`. +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +def generate_response(prompt: str): + print('generating response') + completion = client.create_prediction( + PredictionData( + chatflowId=flow_id, + question=prompt, + overrideConfig={ + "sessionId": "session1234" + }, + streaming=True + ) + ) + + for chunk in completion: + print(chunk) + parsed_chunk = json.loads(chunk) + if (parsed_chunk['event'] == 'token' and parsed_chunk['data'] != ''): + yield str(parsed_chunk['data']) + +# Create a chat input field to allow the user to enter a message. This will display +# automatically at the bottom of the page. +if prompt := st.chat_input("What is up?"): + + # Store and display the current prompt. + st.session_state.messages.append({"role": "user", "content": prompt}) + with st.chat_message("user"): + st.markdown(prompt) + + # Stream the response to the chat using `st.write_stream`, then store it in + # session state. + with st.chat_message("assistant"): + response = generate_response(prompt) + full_response = st.write_stream(response) + st.session_state.messages.append({"role": "assistant", "content": full_response}) +``` + +Github Repo: [https://github.com/HenryHengZJ/flowise-streamlit](https://github.com/HenryHengZJ/flowise-streamlit) diff --git a/fr/integrations/3rd-party-platform-integration/zapier-zaps.md b/fr/integrations/3rd-party-platform-integration/zapier-zaps.md new file mode 100644 index 00000000..53fb4166 --- /dev/null +++ b/fr/integrations/3rd-party-platform-integration/zapier-zaps.md @@ -0,0 +1,104 @@ +--- +description: Apprenez à intégrer Flowise et Zapier +--- + +# Zapier Zaps + +*** + +## Prérequis + +1. [Connectez-vous](https://zapier.com/app/login) ou [inscrivez-vous](https://zapier.com/sign-up) à Zapier +2. Consultez [le déploiement](../../configuration/deployment/) pour créer une version hébergée dans le cloud de Flowise. + +## Configuration + +1. Allez sur [Zapier Zaps](https://zapier.com/app/zaps) +2. Cliquez sur **Créer** + +
+ +### Recevoir le message de déclenchement + +1. Cliquez ou recherchez **Discord** + +
+2. Sélectionnez **Nouveau message posté dans le canal** comme événement, puis cliquez sur **Continuer** + +
+3. **Connectez-vous** à votre compte Discord + +
+4. Ajoutez le **Bot Zapier** à votre serveur préféré + +
+5. Accordez les autorisations appropriées et cliquez sur **Autoriser**, puis cliquez sur **Continuer** + +
+ +
+6. Sélectionnez votre **canal préféré** pour interagir avec le Bot Zapier, puis cliquez sur **Continuer** + +
+7. **Envoyez un message** à votre canal sélectionné à l'étape 8 + +
+8. Cliquez sur **Tester le déclencheur** + +
+9. Sélectionnez votre message, puis cliquez sur **Continuer avec l'enregistrement sélectionné** + +
+ +### Filtrer le message du Bot Zapier + +1. Cliquez ou recherchez **Filtre** + +
+2. Configurez le **Filtre** pour ne pas continuer si le message reçu provient du **Bot Zapier**, puis cliquez sur **Continuer** + +
+ +### FlowiseAI génère le message de résultat + +1. Cliquez sur **+**, cliquez ou recherchez **FlowiseAI** + +
+2. Sélectionnez **Faire une prédiction** comme événement, puis cliquez sur **Continuer** + +
+3. Cliquez sur **Se connecter** et saisissez vos informations, puis cliquez sur **Oui, continuer vers FlowiseAI** + +
+ +
+4. Sélectionnez **Contenu** de Discord et votre ID Flow, puis cliquez sur **Continuer** + +
+5. Cliquez sur **Tester l'action** et attendez votre résultat + +
+ +### Envoyer un message de résultat + +1. Cliquez sur **+**, cliquez ou recherchez **Discord** + +
+2. Sélectionnez **Envoyer un message dans le canal** comme événement, puis cliquez sur **Continuer** + +
+3. Sélectionnez le compte Discord auquel vous êtes connecté, puis cliquez sur **Continuer** + +
+4. Sélectionnez votre canal préféré pour le canal et choisissez **Texte** et **Source de chaîne** (si disponible) de FlowiseAI pour le texte du message, puis cliquez sur **Continuer** + +
+5. Cliquez sur **Tester l'action** + +
+6. Voilà [🎉](https://emojipedia.org/party-popper/) vous devriez voir le message arriver dans votre canal Discord + +
+7. Enfin, renommez votre Zap et publiez-le + +
\ No newline at end of file diff --git a/fr/integrations/README.md b/fr/integrations/README.md new file mode 100644 index 00000000..94a07df3 --- /dev/null +++ b/fr/integrations/README.md @@ -0,0 +1,50 @@ +--- +description: Learn about all available integrations / nodes in Flowise +--- + +# Integrations + +*** + +In Flowise, nodes are referred to as integrations. Similar to LEGO, you can build a customized LLM ochestration flow, a chatbot, an agent with all the integrations available in Flowise. + +### LangChain + +* [Agents](langchain/agents/) +* [Cache](langchain/cache/) +* [Chains](langchain/chains/) +* [Chat Models](langchain/chat-models/) +* [Document Loaders](langchain/document-loaders/) +* [Embeddings](langchain/embeddings/) +* [LLMs](langchain/llms/) +* [Memory](langchain/memory/) +* [Moderation](langchain/moderation/) +* [Output Parsers](langchain/output-parsers/) +* [Prompts](langchain/prompts/) +* [Record Managers](langchain/record-managers.md) +* [Retrievers](langchain/retrievers/) +* [Text Splitters](langchain/text-splitters/) +* [Tools](langchain/tools/) +* [Vector Stores](langchain/vector-stores/) + +### LlamaIndex + +* [Agents](llamaindex/agents/) +* [Chat Models](llamaindex/chat-models/) +* [Embeddings](llamaindex/embeddings/) +* [Engine](llamaindex/engine/) +* [Response Synthesizer](llamaindex/response-synthesizer/) +* [Tools](llamaindex/tools/) +* [Vector Stores](llamaindex/vector-stores/) + +### Utilities + +* [Custom JS Function](utilities/custom-js-function.md) +* [Set/Get Variable](utilities/set-get-variable.md) +* [If Else](utilities/if-else.md) +* [Set Variable](broken-reference) +* [Sticky Note](utilities/sticky-note.md) + +### External Integrations + +* [Zapier Zaps](3rd-party-platform-integration/zapier-zaps.md) diff --git a/fr/integrations/langchain/README.md b/fr/integrations/langchain/README.md new file mode 100644 index 00000000..9e8ce800 --- /dev/null +++ b/fr/integrations/langchain/README.md @@ -0,0 +1,30 @@ +--- +description: Learn how Flowise integrates with the LangChain framework +--- + +# Lubriole + +*** + +[**LangChain**](https://www.langchain.com/)est un cadre pour développer des applications alimentées par des modèles de langue. Il simplifie le processus de création d'application d'IA générative, de connexion des sources de données, des vecteurs, des souvenirs avec LLMS. + +Flowise complète Langchain en offrant une interface visuelle. Ici, les nœuds sont organisés en sections distinctes, ce qui facilite la construction de workflows. + +### Sections de Langchain: + +* [Agents](agents/) +* [Cache](cache/) +* [Chains](chains/) +* [Chat Models](chat-models/) +* [Document Loaders](document-loaders/) +* [Embeddings](embeddings/) +* [LLMs](llms/) +* [Memory](memory/) +* [Moderation](moderation/) +* [Output Parsers](output-parsers/) +* [Prompts](prompts/) +* [Record Managers](record-managers.md) +* [Retrievers](retrievers/) +* [Text Splitters](text-splitters/) +* [Tools](tools/) +* [Vector Stores](vector-stores/) diff --git a/fr/integrations/langchain/agents/README.md b/fr/integrations/langchain/agents/README.md new file mode 100644 index 00000000..8ced6389 --- /dev/null +++ b/fr/integrations/langchain/agents/README.md @@ -0,0 +1,28 @@ +--- +description: Nœuds d'Agent LangChain +--- + +# Agents + +*** + +À eux seuls, les modèles de langage ne peuvent pas agir - ils se contentent de produire du texte. + +Les agents sont des systèmes qui utilisent un LLM comme moteur de raisonnement pour déterminer quelles actions entreprendre et quels devraient être les inputs de ces actions. Les résultats de ces actions peuvent ensuite être réintroduits dans l'agent, qui détermine si d'autres actions sont nécessaires ou s'il est acceptable de terminer. + +### Nœuds d'Agent : + +* [Agent Airtable](airtable-agent.md) +* [AutoGPT](autogpt.md) +* [BabyAGI](babyagi.md) +* [Agent CSV](csv-agent.md) +* [Agent Conversationnel](conversational-agent.md) +* [Agent de Récupération Conversationnelle](conversational-retrieval-agent.md) +* [Agent Outil MistralAI](mistralai-tool-agent.md) +* [Assistant OpenAI](openai-assistant/) +* [Agent Fonction OpenAI](openai-function-agent.md) +* [Agent Outil OpenAI](../../llamaindex/agents/openai-tool-agent.md) +* [Chat d'Agent ReAct](react-agent-chat.md) +* [LLM d'Agent ReAct](react-agent-llm.md) +* [Agent Outil](tool-agent.md) +* [Agent XML](xml-agent.md) \ No newline at end of file diff --git a/fr/integrations/langchain/agents/airtable-agent.md b/fr/integrations/langchain/agents/airtable-agent.md new file mode 100644 index 00000000..4e6ecd1e --- /dev/null +++ b/fr/integrations/langchain/agents/airtable-agent.md @@ -0,0 +1,34 @@ +--- +description: Agent utilisé pour répondre aux requêtes sur la table Airtable. +--- + +# Agent Airtable + +

Node de l'Agent Airtable

+ +## Fonctionnalité de l'Agent Airtable + +L'Agent Airtable est conçu pour faciliter les interactions entre Flowise AI et les tables Airtable, permettant aux utilisateurs de consulter les données d'Airtable de manière conversationnelle. En utilisant cet agent, les utilisateurs peuvent poser des questions sur le contenu de leur base Airtable et recevoir des réponses pertinentes basées sur les données stockées. Cela peut être particulièrement utile pour extraire rapidement des informations spécifiques, automatiser des flux de travail ou générer des résumés à partir des données stockées dans Airtable. + +Par exemple, l'Agent Airtable peut être utilisé pour répondre à des questions telles que : + +* "Combien de tâches sont encore incomplètes dans ma table de suivi de projet ?" +* "Quels sont les détails de contact des clients listés dans le CRM ?" +* "Donnez-moi un résumé de tous les enregistrements ajoutés au cours de la semaine dernière." + +Cette fonctionnalité aide les utilisateurs à obtenir des informations de leurs bases Airtable sans avoir besoin de naviguer dans l'interface Airtable, rendant ainsi la gestion et l'analyse de leurs données plus faciles et interactives. + +## Entrées + +L'Agent Airtable nécessite les entrées suivantes pour fonctionner efficacement : + +* **Modèle de Langue** : Le modèle de langue à utiliser pour traiter les requêtes. Cette entrée est requise et aide à déterminer la qualité et l'exactitude des réponses fournies par l'agent. +* **Modération des Entrées** : Entrée optionnelle qui permet la modération du contenu. Cela aide à garantir que les requêtes sont appropriées et ne contiennent pas de contenu offensant ou nuisible. +* **Identifiant de Connexion** : Entrée requise pour se connecter à Airtable. Les utilisateurs doivent sélectionner l'identifiant approprié qui a les permissions pour accéder à leurs données Airtable. +* **ID de Base** : L'ID de la base Airtable à laquelle se connecter. C'est un champ requis et peut être trouvé dans la documentation API d'Airtable ou dans les paramètres de la base. Si l'URL de votre table ressemble à `https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYlCO/viw9UrP77idOCE4ee`, `app11RobdGoX0YNsC` est l'ID de la Base. Il est utilisé pour spécifier quelle base Airtable contient les données à interroger. +* **ID de Table** : L'ID de la table spécifique au sein de la base Airtable. C'est également un champ requis et aide l'agent à cibler la bonne table pour la récupération des données. Dans l'URL d'exemple `https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYlCO/viw9UrP77idOCE4ee`, `tblJdmvbrgizbYlCO` est l'ID de la Table. +* **Paramètres Supplémentaires** : Paramètres optionnels qui peuvent être utilisés pour personnaliser le comportement de l'agent. Ces paramètres peuvent être configurés en fonction de cas d'utilisation spécifiques. + * **Retourner Tout** : Cette option permet aux utilisateurs de retourner tous les enregistrements de la table spécifiée. Si activée, tous les enregistrements seront récupérés, sinon, seul un nombre limité sera retourné. + * **Limite** : Spécifie le nombre maximum d'enregistrements à retourner si **Retourner Tout** n'est pas activé. La valeur par défaut est `100`. + +**Remarque** : Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez fournir pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. \ No newline at end of file diff --git a/fr/integrations/langchain/agents/autogpt.md b/fr/integrations/langchain/agents/autogpt.md new file mode 100644 index 00000000..638cc912 --- /dev/null +++ b/fr/integrations/langchain/agents/autogpt.md @@ -0,0 +1,11 @@ +--- +description: Agent autonome avec chaîne de pensées pour l'accomplissement autonome des tâches. +--- + +# AutoGPT + +

Node AutoGPT

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/babyagi.md b/fr/integrations/langchain/agents/babyagi.md new file mode 100644 index 00000000..ee735776 --- /dev/null +++ b/fr/integrations/langchain/agents/babyagi.md @@ -0,0 +1,13 @@ +--- +description: >- + Agent autonome orienté tâches qui crée de nouvelles tâches et réorganise la liste des tâches + en fonction des objectifs +--- + +# BabyAGI + +

Node BabyAGI

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/conversational-agent.md b/fr/integrations/langchain/agents/conversational-agent.md new file mode 100644 index 00000000..44625ff7 --- /dev/null +++ b/fr/integrations/langchain/agents/conversational-agent.md @@ -0,0 +1,11 @@ +--- +description: Agent conversationnel pour un modèle de chat. Il utilisera des invites spécifiques au chat. +--- + +# Agent Conversationnel + +

Noeud d'Agent Conversationnel

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/conversational-retrieval-agent.md b/fr/integrations/langchain/agents/conversational-retrieval-agent.md new file mode 100644 index 00000000..c3ee1d75 --- /dev/null +++ b/fr/integrations/langchain/agents/conversational-retrieval-agent.md @@ -0,0 +1,7 @@ +--- +description: Dépréciation de Node. +--- + +# Agent de Récupération Conversationnel + +
\ No newline at end of file diff --git a/fr/integrations/langchain/agents/csv-agent.md b/fr/integrations/langchain/agents/csv-agent.md new file mode 100644 index 00000000..cec1eaca --- /dev/null +++ b/fr/integrations/langchain/agents/csv-agent.md @@ -0,0 +1,11 @@ +--- +description: Agent utilisé pour répondre aux requêtes sur les données CSV. +--- + +# Agent CSV + +

Noeud de l'Agent CSV

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/mistralai-tool-agent.md b/fr/integrations/langchain/agents/mistralai-tool-agent.md new file mode 100644 index 00000000..6eeebee0 --- /dev/null +++ b/fr/integrations/langchain/agents/mistralai-tool-agent.md @@ -0,0 +1,7 @@ +--- +description: Dépréciation de Node. +--- + +# Agent Outil MistralAI + +
\ No newline at end of file diff --git a/fr/integrations/langchain/agents/openai-assistant/README.md b/fr/integrations/langchain/agents/openai-assistant/README.md new file mode 100644 index 00000000..19bf00eb --- /dev/null +++ b/fr/integrations/langchain/agents/openai-assistant/README.md @@ -0,0 +1,11 @@ +--- +description: Un agent qui utilise l'API OpenAI Assistant pour choisir l'outil et les arguments à appeler. +--- + +# OpenAI Assistant + +

OpenAI Assistant

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](../../../../contributing/) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/openai-assistant/threads.md b/fr/integrations/langchain/agents/openai-assistant/threads.md new file mode 100644 index 00000000..e2d619c2 --- /dev/null +++ b/fr/integrations/langchain/agents/openai-assistant/threads.md @@ -0,0 +1,33 @@ +# Threads + +[Threads](https://platform.openai.com/docs/assistants/how-it-works/managing-threads-and-messages) n'est utilisé que lorsqu'un Assistant OpenAI est en cours d'utilisation. Il s'agit d'une session de conversation entre un Assistant et un utilisateur. Les threads stockent les messages et gèrent automatiquement la troncature pour adapter le contenu au contexte d'un modèle. + +
+ +## Conversations séparées pour plusieurs utilisateurs + +### UI & Chat intégré + +Par défaut, l'UI et le Chat intégré sépareront automatiquement les threads pour les conversations de plusieurs utilisateurs. Cela se fait en générant un **`chatId`** unique pour chaque nouvelle interaction. Cette logique est gérée en arrière-plan par Flowise. + +### API de prédiction + +POST /`api/v1/prediction/{your-chatflowid}`, spécifiez le **`chatId`**. Le même thread sera utilisé pour le même chatId. + +```json +{ + "question": "hello!", + "chatId": "user1" +} +``` + +### Message API + +* GET `/api/v1/chatmessage/{your-chatflowid}` +* DELETE `/api/v1/chatmessage/{your-chatflowid}` + +Vous pouvez également filtrer via **`chatId` -** `/api/v1/chatmessage/{your-chatflowid}?chatId={your-chatid}` + +Toutes les conversations peuvent également être visualisées et gérées depuis l'interface utilisateur : + +
diff --git a/fr/integrations/langchain/agents/openai-function-agent.md b/fr/integrations/langchain/agents/openai-function-agent.md new file mode 100644 index 00000000..c7fa832d --- /dev/null +++ b/fr/integrations/langchain/agents/openai-function-agent.md @@ -0,0 +1,7 @@ +--- +description: Dépréciation de Node. +--- + +# Agent de Fonction OpenAI + +
\ No newline at end of file diff --git a/fr/integrations/langchain/agents/openai-tool-agent.md b/fr/integrations/langchain/agents/openai-tool-agent.md new file mode 100644 index 00000000..eddbfb60 --- /dev/null +++ b/fr/integrations/langchain/agents/openai-tool-agent.md @@ -0,0 +1,7 @@ +--- +description: Dépréciation de Node. +--- + +# Agent d'outil OpenAI + +
\ No newline at end of file diff --git a/fr/integrations/langchain/agents/react-agent-chat.md b/fr/integrations/langchain/agents/react-agent-chat.md new file mode 100644 index 00000000..9599a251 --- /dev/null +++ b/fr/integrations/langchain/agents/react-agent-chat.md @@ -0,0 +1,11 @@ +# ReAct Agent Chat + +Agent qui utilise la logique [ReAct](https://react-lm.github.io/) (Raisonnement et Action) pour décider quelle action entreprendre, optimisé pour être utilisé avec des modèles de chat. + +
+ +

Noeud de Chat de l'Agent ReAct

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez fournir pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/react-agent-llm.md b/fr/integrations/langchain/agents/react-agent-llm.md new file mode 100644 index 00000000..17c6f758 --- /dev/null +++ b/fr/integrations/langchain/agents/react-agent-llm.md @@ -0,0 +1,11 @@ +# ReAct Agent LLM + +Agent qui utilise la logique [ReAct](https://react-lm.github.io/) (Raisonnement et Action) pour décider quelle action entreprendre, optimisé pour être utilisé avec des modèles non conversationnels. + +
+ +

Node de l'Agent ReAct LLM

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/tool-agent.md b/fr/integrations/langchain/agents/tool-agent.md new file mode 100644 index 00000000..3ad73da5 --- /dev/null +++ b/fr/integrations/langchain/agents/tool-agent.md @@ -0,0 +1,11 @@ +--- +description: Agent qui utilise l'appel de fonction pour choisir les outils et les arguments à appeler. +--- + +# Agent d'Outils + +

Noeud de l'Agent d'Outils

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/agents/xml-agent.md b/fr/integrations/langchain/agents/xml-agent.md new file mode 100644 index 00000000..c2b76e05 --- /dev/null +++ b/fr/integrations/langchain/agents/xml-agent.md @@ -0,0 +1,13 @@ +--- +description: >- + Agent conçu pour les LLMs qui sont performants en raisonnement/rédaction XML (par ex : + Anthropic Claude). +--- + +# Agent XML + +

Noeud de l'Agent XML

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/cache/README.md b/fr/integrations/langchain/cache/README.md new file mode 100644 index 00000000..19a521b1 --- /dev/null +++ b/fr/integrations/langchain/cache/README.md @@ -0,0 +1,18 @@ +--- +description: Nœuds de Cache LangChain +--- + +# Cache + +*** + +Le caching peut vous faire économiser de l'argent en réduisant le nombre d'appels API que vous effectuez auprès du fournisseur LLM, si vous demandez souvent la même complétion plusieurs fois. Cela peut accélérer votre application en diminuant le nombre d'appels API que vous faites au fournisseur LLM. + +### Nœuds de Cache : + +* [Cache Memoire](in-memory-cache.md) +* [Cache Integrée Memoire](inmemory-embedding-cache.md) +* [Cache Momento](momento-cache.md) +* [Cache Redis](redis-cache.md) +* [Cache Integrée Redis](redis-embeddings-cache.md) +* [Cache Redis Upstash](upstash-redis-cache.md) \ No newline at end of file diff --git a/fr/integrations/langchain/cache/in-memory-cache.md b/fr/integrations/langchain/cache/in-memory-cache.md new file mode 100644 index 00000000..919c9bc1 --- /dev/null +++ b/fr/integrations/langchain/cache/in-memory-cache.md @@ -0,0 +1,11 @@ +--- +description: Met en cache la réponse LLM dans la mémoire locale, sera effacée lorsque l'application sera redémarrée. +--- + +# Cache InMemory + +

Noeud de Cache InMemory

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/cache/inmemory-embedding-cache.md b/fr/integrations/langchain/cache/inmemory-embedding-cache.md new file mode 100644 index 00000000..b604abf0 --- /dev/null +++ b/fr/integrations/langchain/cache/inmemory-embedding-cache.md @@ -0,0 +1,11 @@ +--- +description: Mettre en cache les embeddings générés en mémoire pour éviter de devoir les recalculer. +--- + +# Cache d'Embedding en Mémoire + +

Noeud de Cache d'Embedding en Mémoire

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/cache/momento-cache.md b/fr/integrations/langchain/cache/momento-cache.md new file mode 100644 index 00000000..24956a4f --- /dev/null +++ b/fr/integrations/langchain/cache/momento-cache.md @@ -0,0 +1,11 @@ +--- +description: Mettre en cache la réponse LLM en utilisant Momento, un cache distribué et sans serveur. +--- + +# Cache Momento + +

Noeud de Cache Momento

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/cache/redis-cache.md b/fr/integrations/langchain/cache/redis-cache.md new file mode 100644 index 00000000..4b16ca7c --- /dev/null +++ b/fr/integrations/langchain/cache/redis-cache.md @@ -0,0 +1,13 @@ +--- +description: >- + Mettre en cache la réponse LLM dans Redis, utile pour partager le cache + entre plusieurs processus ou serveurs. +--- + +# Cache Redis + +

Noeud de Cache Redis

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez fournir pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/cache/redis-embeddings-cache.md b/fr/integrations/langchain/cache/redis-embeddings-cache.md new file mode 100644 index 00000000..d1ddf949 --- /dev/null +++ b/fr/integrations/langchain/cache/redis-embeddings-cache.md @@ -0,0 +1,13 @@ +--- +description: >- + Mettre en cache la réponse LLM dans Redis, utile pour partager le cache entre plusieurs + processus ou serveurs. +--- + +# Cache d'Embeddings Redis + +

Noeud de Cache d'Embeddings Redis

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/cache/upstash-redis-cache.md b/fr/integrations/langchain/cache/upstash-redis-cache.md new file mode 100644 index 00000000..b10cfcdc --- /dev/null +++ b/fr/integrations/langchain/cache/upstash-redis-cache.md @@ -0,0 +1,11 @@ +--- +description: Mise en cache des réponses LLM dans Upstash Redis, données sans serveur pour Redis et Kafka. +--- + +# Cache Upstash Redis + +

Noeud de cache Upstash Redis

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/README.md b/fr/integrations/langchain/chains/README.md new file mode 100644 index 00000000..03e48d94 --- /dev/null +++ b/fr/integrations/langchain/chains/README.md @@ -0,0 +1,34 @@ +--- +description: Nœuds de chaîne LangChain +--- + +# Chaînes + +*** + +Dans le contexte des chatbots et des grands modèles de langage, les "chaînes" désignent généralement des séquences de texte ou de tours de conversation. Ces chaînes sont utilisées pour stocker et gérer l'historique de la conversation et le contexte pour le chatbot ou le modèle de langage. Les chaînes aident le modèle à comprendre la conversation en cours et à fournir des réponses cohérentes et contextuellement pertinentes. + +Voici comment fonctionnent les chaînes : + +1. **Historique de la conversation** : Lorsque un utilisateur interagit avec un chatbot ou un modèle de langage, la conversation est souvent représentée comme une série de messages texte ou de tours de conversation. Chaque message de l'utilisateur et du modèle est stocké dans l'ordre chronologique pour maintenir le contexte de la conversation. +2. **Entrée et sortie** : Chaque chaîne se compose à la fois de l'entrée de l'utilisateur et de la sortie du modèle. L'entrée de l'utilisateur est généralement appelée la "chaîne d'entrée", tandis que les réponses du modèle sont stockées dans la "chaîne de sortie". Cela permet au modèle de se référer aux messages précédents dans la conversation. +3. **Compréhension contextuelle** : En préservant l'ensemble de l'historique de la conversation dans ces chaînes, le modèle peut comprendre le contexte et se référer à des messages antérieurs pour fournir des réponses cohérentes et contextuellement pertinentes. Cela est crucial pour maintenir une conversation naturelle et significative avec les utilisateurs. +4. **Longueur maximale** : Les chaînes ont une longueur maximale pour gérer l'utilisation de la mémoire et les ressources informatiques. Lorsque une chaîne devient trop longue, des messages plus anciens peuvent être supprimés ou tronqués pour faire de la place pour de nouveaux messages. Cela peut potentiellement entraîner une perte de contexte si des détails importants de la conversation sont supprimés. +5. **Continuation de la conversation** : Dans une interaction en temps réel avec un chatbot ou un modèle de langage, la chaîne d'entrée est continuellement mise à jour avec les nouveaux messages de l'utilisateur, et la chaîne de sortie est mise à jour avec les réponses du modèle. Cela permet au modèle de suivre la conversation en cours et de répondre de manière appropriée. + +Les chaînes sont un concept fondamental dans la construction et le maintien des conversations des chatbots et des modèles de langage. Elles garantissent que le modèle a accès au contexte dont il a besoin pour générer des réponses significatives et conscientes du contexte, rendant l'interaction plus engageante et utile pour les utilisateurs. + +### Nœuds de chaîne : + +* [GET API Chain](get-api-chain.md) +* [OpenAPI Chain](openapi-chain.md) +* [POST API Chain](post-api-chain.md) +* [Conversation Chain](conversation-chain.md) +* [Conversational Retrieval QA Chain](conversational-retrieval-qa-chain.md) +* [LLM Chain](llm-chain.md) +* [Multi Prompt Chain](multi-prompt-chain.md) +* [Multi Retrieval QA Chain](multi-retrieval-qa-chain.md) +* [Retrieval QA Chain](retrieval-qa-chain.md) +* [Sql Database Chain](sql-database-chain.md) +* [Vectara QA Chain](vectara-chain.md) +* [VectorDB QA Chain](vectordb-qa-chain.md) \ No newline at end of file diff --git a/fr/integrations/langchain/chains/conversation-chain.md b/fr/integrations/langchain/chains/conversation-chain.md new file mode 100644 index 00000000..a85ae1d7 --- /dev/null +++ b/fr/integrations/langchain/chains/conversation-chain.md @@ -0,0 +1,11 @@ +--- +description: Modèles de chat pour une chaîne de conversation spécifique avec mémoire. +--- + +# Chaîne de Conversation + +

Noeud de la Chaîne de Conversation

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/conversational-retrieval-qa-chain.md b/fr/integrations/langchain/chains/conversational-retrieval-qa-chain.md new file mode 100644 index 00000000..86873233 --- /dev/null +++ b/fr/integrations/langchain/chains/conversational-retrieval-qa-chain.md @@ -0,0 +1,31 @@ +# Conversational Retrieval QA Chain + +A chain for performing question-answering tasks with a retrieval component. + +
+ +## Definitions + +**A retrieval-based question-answering chain**, which integrates with a retrieval component and allows you to configure input parameters and perform question-answering tasks.\ +**Retrieval-Based Chatbots:** Retrieval-based chatbots are chatbots that generate responses by selecting pre-defined responses from a database or a set of possible responses. They "retrieve" the most appropriate response based on the input from the user.\ +**QA (Question Answering):** QA systems are designed to answer questions posed in natural language. They typically involve understanding the question and searching for or generating an appropriate answer. + +## Inputs + +* [Language Model](../chat-models/) +* [Vector Store Retriever](../vector-stores/) +* [Memory (optional)](../memory/) + +## Parameters + +| Name | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Return Source Documents | To return citations/sources that were used to build up the response | +| System Message | An instruction for LLM on how to answer query | +| Chain Option | Method on how to summarize, answer questions, and extract information from documents. Read [more](https://js.langchain.com/docs/modules/chains/document/) | + +## Outputs + +| Name | Description | +| ------------------------------ | ----------------------------- | +| ConversationalRetrievalQAChain | Final node to return response | diff --git a/fr/integrations/langchain/chains/get-api-chain.md b/fr/integrations/langchain/chains/get-api-chain.md new file mode 100644 index 00000000..a454a765 --- /dev/null +++ b/fr/integrations/langchain/chains/get-api-chain.md @@ -0,0 +1 @@ +fr/integrations/langchain/chains/conversation-chain.md fr/integrations/langchain/chains/conversational-retrieval-qa-chain.md fr/integrations/langchain/chains/get-api-chain.md fr/integrations/langchain/chains/llm-chain.md fr/integrations/langchain/chains/multi-prompt-chain.md fr/integrations/langchain/chains/multi-retrieval-qa-chain.md fr/integrations/langchain/chains/openapi-chain.md fr/integrations/langchain/chains/post-api-chain.md fr/integrations/langchain/chains/README.md fr/integrations/langchain/chains/retrieval-qa-chain.md fr/integrations/langchain/chains/sql-database-chain.md fr/integrations/langchain/chains/vectara-chain.md fr/integrations/langchain/chains/vectordb-qa-chain.md \ No newline at end of file diff --git a/fr/integrations/langchain/chains/llm-chain.md b/fr/integrations/langchain/chains/llm-chain.md new file mode 100644 index 00000000..9c98e517 --- /dev/null +++ b/fr/integrations/langchain/chains/llm-chain.md @@ -0,0 +1,11 @@ +--- +description: Chaîne pour exécuter des requêtes contre des LLM. +--- + +# Chaîne LLM + +

Noeud de la Chaîne LLM

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} diff --git a/fr/integrations/langchain/chains/multi-prompt-chain.md b/fr/integrations/langchain/chains/multi-prompt-chain.md new file mode 100644 index 00000000..4f9634d7 --- /dev/null +++ b/fr/integrations/langchain/chains/multi-prompt-chain.md @@ -0,0 +1,12 @@ +--- +description: >- + Chain sélectionne automatiquement un prompt approprié parmi plusieurs modèles de prompt. +--- + +# Chaîne de Prompts Multiples + +

Noeud de Chaîne de Prompts Multiples

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/multi-retrieval-qa-chain.md b/fr/integrations/langchain/chains/multi-retrieval-qa-chain.md new file mode 100644 index 00000000..b681befb --- /dev/null +++ b/fr/integrations/langchain/chains/multi-retrieval-qa-chain.md @@ -0,0 +1,13 @@ +--- +description: >- + Chaîne QA qui sélectionne automatiquement un magasin de vecteurs approprié parmi plusieurs + récupérateurs. +--- + +# Chaîne QA de Multi Récupération + +

Noeud de la Chaîne QA de Multi Récupération

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/openapi-chain.md b/fr/integrations/langchain/chains/openapi-chain.md new file mode 100644 index 00000000..49c041df --- /dev/null +++ b/fr/integrations/langchain/chains/openapi-chain.md @@ -0,0 +1,11 @@ +--- +description: Chaîne qui sélectionne et appelle automatiquement des API uniquement sur la base d'une spécification OpenAPI. +--- + +# Chaîne OpenAPI + +

Noeud de la Chaîne OpenAPI

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/post-api-chain.md b/fr/integrations/langchain/chains/post-api-chain.md new file mode 100644 index 00000000..a97b9ea8 --- /dev/null +++ b/fr/integrations/langchain/chains/post-api-chain.md @@ -0,0 +1,11 @@ +--- +description: Chaîne pour exécuter des requêtes contre l'API POST. +--- + +# Chaîne API POST + +

Noeud de la chaîne API POST

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/retrieval-qa-chain.md b/fr/integrations/langchain/chains/retrieval-qa-chain.md new file mode 100644 index 00000000..380ec7b8 --- /dev/null +++ b/fr/integrations/langchain/chains/retrieval-qa-chain.md @@ -0,0 +1,11 @@ +--- +description: Chaîne QA pour répondre à une question basée sur les documents récupérés. +--- + +# Chaîne QA de Récupération + +

Noeud de la Chaîne QA de Récupération

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/sql-database-chain.md b/fr/integrations/langchain/chains/sql-database-chain.md new file mode 100644 index 00000000..a4a374b5 --- /dev/null +++ b/fr/integrations/langchain/chains/sql-database-chain.md @@ -0,0 +1,11 @@ +--- +description: Répondre à des questions sur une base de données SQL. +--- + +# Chaîne de Base de Données SQL + +

Noeud de Chaîne de Base de Données SQL

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chains/vectara-chain.md b/fr/integrations/langchain/chains/vectara-chain.md new file mode 100644 index 00000000..2bc49cb1 --- /dev/null +++ b/fr/integrations/langchain/chains/vectara-chain.md @@ -0,0 +1,27 @@ +# Vectara QA Chain + +Une chaîne pour effectuer des tâches de question-réponse avec Vectara. + +
+ +## Définitions + +**Une chaîne de question-réponse basée sur la récupération**, qui s'intègre à un composant de récupération Vectara et vous permet de configurer les paramètres d'entrée et d'effectuer des tâches de question-réponse. + +## Entrées + +* [Vectara Store](../vector-stores/vectara.md) + +## Paramètres + +| Nom | Description | +| ---------------------- | ------------------------------------------------------------- | +| Nom du Prompt de Résumé | modèle à utiliser pour générer le résumé | +| Langue de la Réponse | langue souhaitée pour la réponse | +| Résultats Résumés Max | nombre de résultats principaux à utiliser dans le résumé (par défaut 7) | + +## Sorties + +| Nom | Description | +| ----------------- | ----------------------------- | +| VectaraQAChain | Noeud final pour retourner la réponse | \ No newline at end of file diff --git a/fr/integrations/langchain/chains/vectordb-qa-chain.md b/fr/integrations/langchain/chains/vectordb-qa-chain.md new file mode 100644 index 00000000..b637e09e --- /dev/null +++ b/fr/integrations/langchain/chains/vectordb-qa-chain.md @@ -0,0 +1,11 @@ +--- +description: Chaîne QA pour les bases de données vectorielles. +--- + +# Chaîne QA VectorDB + +

Noeud de la chaîne QA VectorDB

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/README.md b/fr/integrations/langchain/chat-models/README.md new file mode 100644 index 00000000..c62a36d5 --- /dev/null +++ b/fr/integrations/langchain/chat-models/README.md @@ -0,0 +1,30 @@ +--- +description: Nœuds de Modèles de Chat LangChain +--- + +# Modèles de Chat + +*** + +Les modèles de chat prennent une liste de messages en entrée et renvoient un message généré par le modèle en sortie. Ces modèles, tels que **gpt-3.5-turbo** ou **gpt4**, sont puissants et moins coûteux que leurs prédécesseurs, les modèles de Complétion tels que **text-davincii-003**. + +### Nœuds de Modèle de Chat : + +* [AWS ChatBedrock](aws-chatbedrock.md) +* [Azure ChatOpenAI](../../llamaindex/chat-models/azurechatopenai.md) +* [NIBittensorChat](broken-reference) +* [ChatAnthropic](chatanthropic.md) +* [ChatCohere](chatcohere.md) +* [Chat Fireworks](chat-fireworks.md) +* [ChatGoogleGenerativeAI](google-ai.md) +* [ChatGooglePaLM](broken-reference) +* [Google VertexAI](google-vertexai.md) +* [ChatHuggingFace](chathuggingface.md) +* [ChatLocalAI](chatlocalai.md) +* [ChatMistralAI](mistral-ai.md) +* [ChatOllama](chatollama.md) +* [ChatOllama Funtion](broken-reference) +* [ChatOpenAI](azure-chatopenai.md) +* [ChatOpenAI Custom](broken-reference) +* [ChatTogetherAI](chattogetherai.md) +* [GroqChat](groqchat.md) \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/aws-chatbedrock.md b/fr/integrations/langchain/chat-models/aws-chatbedrock.md new file mode 100644 index 00000000..866e1d53 --- /dev/null +++ b/fr/integrations/langchain/chat-models/aws-chatbedrock.md @@ -0,0 +1,11 @@ +--- +description: Wrapper autour des modèles de langage volumineux AWS Bedrock utilisant le point de terminaison Chat. +--- + +# AWS ChatBedrock + +

AWS ChatBedrock

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/azure-chatopenai-1.md b/fr/integrations/langchain/chat-models/azure-chatopenai-1.md new file mode 100644 index 00000000..866e1d53 --- /dev/null +++ b/fr/integrations/langchain/chat-models/azure-chatopenai-1.md @@ -0,0 +1,11 @@ +--- +description: Wrapper autour des modèles de langage volumineux AWS Bedrock utilisant le point de terminaison Chat. +--- + +# AWS ChatBedrock + +

AWS ChatBedrock

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/azure-chatopenai.md b/fr/integrations/langchain/chat-models/azure-chatopenai.md new file mode 100644 index 00000000..5de1d4f5 --- /dev/null +++ b/fr/integrations/langchain/chat-models/azure-chatopenai.md @@ -0,0 +1,64 @@ +# ChatOpenAI + +## Prerequisite + +1. An [OpenAI](https://openai.com/) account +2. Create an [API key](https://platform.openai.com/api-keys) + +## Setup + +1. **Chat Models** > drag **ChatOpenAI** node + +
+ +2. **Connect Credential** > click **Create New** + +
+ +2. Fill in the **ChatOpenAI** credential + +
+ +4. Voila [🎉](https://emojipedia.org/party-popper/), you can now use **ChatOpenAI node** in Flowise + +
+ +## Custom base URL and headers + +Flowise supports using custom base URL and headers for Chat OpenAI. Users can easily use integrations like OpenRouter, TogetherAI and others that support OpenAI API compatibility. + +### TogetherAI + +1. Refer to official [docs](https://docs.together.ai/docs/openai-api-compatibility#nodejs) from TogetherAI +2. Create a new credential with TogetherAI API key +3. Click **Additional Parameters** on ChatOpenAI node. +4. Change the Base Path: + +
+ +### Open Router + +1. Refer to official [docs](https://openrouter.ai/docs#quick-start) from OpenRouter +2. Create a new credential with OpenRouter API key +3. Click Additional Parameters on ChatOpenAI node +4. Change the Base Path and Base Options: + +
+ +## Custom Model + +For models that are not supported on ChatOpenAI node, you can use ChatOpenAI Custom for that. This allow users to fill in model name such as `mistralai/Mixtral-8x7B-Instruct-v0.1` + +
+ +## Image Upload + +You can also allow images to be uploaded and analyzed by LLM. Under the hood, Flowise will use [OpenAI Vison ](https://platform.openai.com/docs/guides/vision)model to process the image. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent. + +
+ +From the chat interface, you will now see a new image upload button: + +
+ +
diff --git a/fr/integrations/langchain/chat-models/chat-fireworks.md b/fr/integrations/langchain/chat-models/chat-fireworks.md new file mode 100644 index 00000000..30dbf3fa --- /dev/null +++ b/fr/integrations/langchain/chat-models/chat-fireworks.md @@ -0,0 +1,11 @@ +--- +description: Enveloppe autour des points de terminaison de Fireworks Chat. +--- + +# Chat Fireworks + +

Noeud de Chat Fireworks

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/chatanthropic.md b/fr/integrations/langchain/chat-models/chatanthropic.md new file mode 100644 index 00000000..19c91ccf --- /dev/null +++ b/fr/integrations/langchain/chat-models/chatanthropic.md @@ -0,0 +1,11 @@ +--- +description: Enveloppe autour des modèles de langage de ChatAnthropic qui utilisent le point de terminaison Chat. +--- + +# ChatAnthropic + +

Noeud ChatAnthropic

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/chatcohere.md b/fr/integrations/langchain/chat-models/chatcohere.md new file mode 100644 index 00000000..a1d9051c --- /dev/null +++ b/fr/integrations/langchain/chat-models/chatcohere.md @@ -0,0 +1,11 @@ +--- +description: Enveloppe autour des points de terminaison de Cohere Chat. +--- + +# ChatCohere + +

Node ChatCohere

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/chatcometapi.md b/fr/integrations/langchain/chat-models/chatcometapi.md new file mode 100644 index 00000000..b6a6ef21 --- /dev/null +++ b/fr/integrations/langchain/chat-models/chatcometapi.md @@ -0,0 +1,16 @@ +# ChatCometAPI + +## Description +CometAPI est une plateforme API unifiée qui donne accès à plus de 500 modèles d'IA, y compris GPT, Claude, Gemini, Qwen, DeepSeek, Midjourney, et d'autres, via une seule intégration. Elle offre un accès simplifié avec des formats API cohérents entre différents fournisseurs de modèles. + +## Prérequis +1. Consultez la [documentation](https://api.cometapi.com/doc) officielle de CometAPI. +2. Obtenez votre clé API depuis la [Console CometAPI](https://api.cometapi.com/console/token). + +## Guide étape par étape +

Noeud ChatCometAPI

+ +1. **Modèles de chat** > Faites glisser le noeud **ChatCometAPI**. +2. Créez un nouvel identifiant avec la clé API de CometAPI. +3. Cliquez sur **Paramètres supplémentaires** dans le noeud ChatCometAPI. +4. Changez le chemin de base en : `https://api.cometapi.com/v1/`. \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/chathuggingface.md b/fr/integrations/langchain/chat-models/chathuggingface.md new file mode 100644 index 00000000..074a6604 --- /dev/null +++ b/fr/integrations/langchain/chat-models/chathuggingface.md @@ -0,0 +1,11 @@ +--- +description: Wrapper autour des grands modèles de langage HuggingFace. +--- + +# ChatHuggingFace + +

Node ChatHuggingFace

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/chatlocalai.md b/fr/integrations/langchain/chat-models/chatlocalai.md new file mode 100644 index 00000000..3c76a55b --- /dev/null +++ b/fr/integrations/langchain/chat-models/chatlocalai.md @@ -0,0 +1,62 @@ +# ChatLocalAI + +## Configuration de LocalAI + +[**LocalAI** ](https://github.com/go-skynet/LocalAI) est une API REST de remplacement qui est compatible avec les spécifications de l'API OpenAI pour l'inférence locale. Elle vous permet d'exécuter des LLM (et pas seulement) localement ou sur site avec du matériel de consommation, prenant en charge plusieurs familles de modèles compatibles avec le format ggml. + +Pour utiliser ChatLocalAI dans Flowise, suivez les étapes ci-dessous : + +1. ```bash + git clone https://github.com/go-skynet/LocalAI ``` +2.
cd LocalAI
+   
+3. ```bash + # copy your models to models/ + cp your-model.bin models/ + ``` + +Pour exemple : + +Téléchargez l'un des modèles depuis [gpt4all.io](https://gpt4all.io/index.html) + +```bash +# Download gpt4all-j to models/ +wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j +``` +In le dossier `/models`, vous devriez pouvoir voir le modèle téléchargé : + +
+ +Consultez [ici](https://localai.io/model-compatibility/index.html) pour la liste des modèles pris en charge. + +4. ```bash + docker compose up -d --pull always +``` ``` +5. Now API is accessible at localhost:8080 + +```bash +# Test API +curl http://localhost:8080/v1/models +# {"object":"list","data":[{"id":"ggml-gpt4all-j.bin","object":"model"}]} +``` + +## Configuration de Flowise + +Glissez-déposez un nouveau composant ChatLocalAI sur le canevas : + +
+ +Remplissez les champs : + +* **Chemin de base** : L'URL de base de LocalAI, comme [http://localhost:8080/v1](http://localhost:8080/v1) +* **Nom du modèle** : Le modèle que vous souhaitez utiliser. Notez qu'il doit se trouver dans le dossier `/models` du répertoire LocalAI. Par exemple : `ggml-gpt4all-j.bin` + +{% hint style="info" %} +Si vous exécutez à la fois Flowise et LocalAI sur Docker, vous devrez peut-être changer le chemin de base en [http://host.docker.internal:8080/v1](http://host.docker.internal:8080/v1). Pour les systèmes basés sur Linux, la passerelle Docker par défaut doit être utilisée car host.docker.internal n'est pas disponible : [http://172.17.0.1:8080/v1](http://172.17.0.1:8080/v1) +{% endhint %} + +C'est tout ! Pour plus d'informations, consultez la [documentation](https://localai.io/basics/getting_started/index.html) de LocalAI. + +Regardez comment vous pouvez utiliser LocalAI sur Flowise + +{% embed url="https://youtu.be/0B0oIs8NS9k" %} diff --git a/fr/integrations/langchain/chat-models/chatollama.md b/fr/integrations/langchain/chat-models/chatollama.md new file mode 100644 index 00000000..4c12a4f0 --- /dev/null +++ b/fr/integrations/langchain/chat-models/chatollama.md @@ -0,0 +1,38 @@ +# ChatOllama + +## Prérequis + +1. Téléchargez [Ollama](https://github.com/ollama/ollama) ou exécutez-le sur [Docker.](https://hub.docker.com/r/ollama/ollama) +2. Par exemple, vous pouvez utiliser la commande suivante pour lancer une instance Docker avec llama3 + + ```bash + docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama + docker exec -it ollama ollama run llama3 + ``` + +## Configuration + +1. **Modèles de chat** > faites glisser le nœud **ChatOllama** + +
+ +2. Remplissez le modèle qui fonctionne sur Ollama. Par exemple : `llama2`. Vous pouvez également utiliser des paramètres supplémentaires : + +
+ +3. Voilà [🎉](https://emojipedia.org/party-popper/), vous pouvez maintenant utiliser le **nœud ChatOllama** dans Flowise + +
+ +### Supplémentaire + +Si vous exécutez à la fois Flowise et Ollama sur Docker, vous devrez changer l'URL de base pour ChatOllama. + +Pour les systèmes d'exploitation Windows et MacOS, spécifiez [http://host.docker.internal:8000](http://host.docker.internal:8000/). Pour les systèmes basés sur Linux, la passerelle Docker par défaut doit être utilisée car host.docker.internal n'est pas disponible : [http://172.17.0.1:8000](http://172.17.0.1:8000/) + +
+ +## Ressources + +* [LangchainJS ChatOllama](https://js.langchain.com/docs/integrations/chat/ollama) +* [Ollama](https://github.com/ollama/ollama) \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/chattogetherai.md b/fr/integrations/langchain/chat-models/chattogetherai.md new file mode 100644 index 00000000..36265af7 --- /dev/null +++ b/fr/integrations/langchain/chat-models/chattogetherai.md @@ -0,0 +1,11 @@ +--- +description: Enveloppe autour des modèles de langage de TogetherAI +--- + +# ChatTogetherAI + +

Node ChatTogetherAI

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/google-ai.md b/fr/integrations/langchain/chat-models/google-ai.md new file mode 100644 index 00000000..ceb1cf9c --- /dev/null +++ b/fr/integrations/langchain/chat-models/google-ai.md @@ -0,0 +1,42 @@ +# ChatGoogleGenerativeAI + +## Prérequis + +1. Inscrivez-vous pour un compte [Google](https://accounts.google.com/InteractiveLogin) +2. Créez une [clé API](https://aistudio.google.com/app/apikey) + +## Configuration + +1. **Modèles de Chat** > faites glisser le nœud **ChatGoogleGenerativeAI** + +
+ +2. **Connecter les Identifiants** > cliquez sur **Créer Nouveau** + +
+ +3. Remplissez les identifiants **Google AI** + +
+ +4. Voilà [🎉](https://emojipedia.org/party-popper/), vous pouvez maintenant utiliser le nœud **ChatGoogleGenerativeAI** dans Flowise + +
+ +## Configuration des Attributs de Sécurité + +1. Cliquez sur **Paramètres Supplémentaires** + +
+ +* Lors de la configuration des **Attributs de Sécurité**, le nombre de sélections dans **Catégorie de Dommages** et **Seuil de Blocage de Dommages** doit être le même. Sinon, une erreur sera générée : `La Catégorie de Dommages et le Seuil de Blocage de Dommages ne sont pas de la même longueur` + +* La combinaison des **Attributs de Sécurité** ci-dessous entraînera que `Dangereux` est défini sur `Faible et Au-dessus` et `Harcèlement` est défini sur `Moyen et Au-dessus` + +
+ +## Ressources + +* [LangChain JS ChatGoogleGenerativeAI](https://js.langchain.com/docs/integrations/chat/google_generativeai) +* [Google AI pour les Développeurs](https://ai.google.dev/) +* [Documentation de l'API Gemini](https://ai.google.dev/docs) \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/google-vertexai.md b/fr/integrations/langchain/chat-models/google-vertexai.md new file mode 100644 index 00000000..c32b2c3a --- /dev/null +++ b/fr/integrations/langchain/chat-models/google-vertexai.md @@ -0,0 +1,86 @@ +# Google VertexAI + +## Prérequis + +1. [Commencez votre GCP](https://cloud.google.com/docs/get-started) +2. Installez le [Google Cloud CLI](https://cloud.google.com/sdk/docs/install-sdk) + +## Configuration + +### Activer l'API Vertex AI + +1. Allez sur Vertex AI dans GCP et cliquez sur **"ACTIVER TOUTES LES API RECOMMANDÉES"** + +
+ +## Créer un fichier d'identification _(Optionnel)_ + +Il existe 2 façons de créer un fichier d'identification + +### No. 1 : Utiliser le GCP CLI + +1. Ouvrez le terminal et exécutez la commande suivante + +```bash +gcloud auth application-default login +``` +```markdown +2. Connectez-vous à votre compte GCP +3. Vérifiez votre fichier d'identification. Vous pouvez trouver votre fichier d'identification dans `~/.config/gcloud/application_default_credentials.json` + +### No. 2 : Utiliser la console GCP + +1. Allez sur la console GCP et cliquez sur **"CRÉER DES IDENTIFIANTS"** + +
+ +2. Créez un compte de service + +
+ +3. Remplissez le formulaire des détails du compte de service et cliquez sur **"CRÉER ET CONTINUER"** +4. Sélectionnez le rôle approprié (par exemple, Utilisateur Vertex AI) et cliquez sur **"FAIRE"** + +
+ +5. Cliquez sur le compte de service que vous avez créé et cliquez sur **"AJOUTER UNE CLÉ" -> "Créer une nouvelle clé"** + +
+ +6. Sélectionnez JSON et cliquez sur **"CRÉER"**, puis vous pourrez télécharger votre fichier d'identification + +
+ +## Flowise + +
+ +### Sans fichier d'identification + +Si vous utilisez un service GCP comme Cloud Run, ou si vous avez installé des identifiants par défaut sur votre machine locale, vous n'avez pas besoin de définir cet identifiant. + +### Avec fichier d'identification + +1. Allez sur la page des identifiants sur Flowise et cliquez sur **"Ajouter un identifiant"** +2. Cliquez sur Google Vertex Auth + +
+ +3. Enregistrez votre fichier d'identification. Il y a 2 façons d'enregistrer votre fichier d'identification. + +
+ +* **Option 1 : Entrez le chemin de votre fichier d'identification** + * Si vous avez un fichier d'identification sur votre machine, vous pouvez entrer le chemin de votre fichier d'identification dans `Chemin du fichier d'identification Google Application` +* **Option 2 : Collez le texte de votre fichier d'identification** + * Ou vous pouvez copier tout le texte du fichier d'identification et le coller dans `Objet JSON d'identification Google` + +4. Enfin, cliquez sur le bouton "Ajouter". +5. **🎉**Vous pouvez maintenant utiliser ChatGoogleVertexAI avec l'identifiant dans Flowise ! + +### Ressources + +* [LangChain JS GoogleVertexAI](https://js.langchain.com/docs/api/llms_googlevertexai/classes/GoogleVertexAI) +* [Aperçu des comptes de service Google](https://cloud.google.com/iam/docs/service-account-overview?) +* [Essayez Google Vertex AI Palm 2 avec Flowise : Sans coder pour tirer parti de l'intuition](https://tech.beatrust.com/entry/2023/08/22/Try_Google_Vertex_AI_Palm_2_with_Flowise%3A_Without_Coding_to_Leverage_Intuition) +``` \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/groqchat.md b/fr/integrations/langchain/chat-models/groqchat.md new file mode 100644 index 00000000..3298a161 --- /dev/null +++ b/fr/integrations/langchain/chat-models/groqchat.md @@ -0,0 +1,11 @@ +--- +description: Enveloppe autour de l'API Groq avec le moteur d'inférence LPU. +--- + +# GroqChat + +

Node GroqChat

+ +{% hint style="info" %} +Cette section est en cours de développement. Nous apprécions toute aide que vous pouvez apporter pour compléter cette section. Veuillez consulter notre [Guide de Contribution](broken-reference) pour commencer. +{% endhint %} \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/ibm-watsonx.md b/fr/integrations/langchain/chat-models/ibm-watsonx.md new file mode 100644 index 00000000..04d8b3b4 --- /dev/null +++ b/fr/integrations/langchain/chat-models/ibm-watsonx.md @@ -0,0 +1,49 @@ +# IBM Watsonx + +## Prérequis + +1. Inscrivez-vous sur [IBM Watsonx](https://www.ibm.com/watsonx) +2. Créez un nouveau projet : + +
+ +
+ +3. Une fois le projet créé, retournez au tableau de bord principal et cliquez sur **Explorer les modèles de base** : + +
+ +4. Choisissez le modèle que vous souhaitez utiliser et ouvrez-le dans Prompt Lab : + +
+ +5. Dans le coin supérieur droit, cliquez sur Afficher le code : + +
+ +6. Notez le paramètre `model_id` et `version`. Dans ce cas, c'est `ibm/granite-3-8b-instruct,` et la version est `2023-05-29`. +7. Cliquez sur la barre de navigation à gauche, puis cliquez sur Accès développeur + +
+ +8. Notez l'URL `watsonx.ai`, l'ID du projet et créez une nouvelle clé API depuis le tableau de bord IBM Cloud. +9. À ce stade, vous devriez avoir les informations suivantes : + * URL Watsonx.ai + * ID du projet + * Clé API + * Version du modèle + * ID du modèle + +## Configuration + +1. **Modèles de chat** > faites glisser le nœud **ChatIBMWatsonx** + +
+ +2. Remplissez le modèle avec l'ID du modèle précédemment. Créez de nouvelles informations d'identification et remplissez tous les détails. + +
+ +2. Voilà [🎉](https://emojipedia.org/party-popper/), vous pouvez maintenant utiliser le **nœud ChatIBMWatsonx** dans Flowise ! + +
\ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/mistral-ai.md b/fr/integrations/langchain/chat-models/mistral-ai.md new file mode 100644 index 00000000..d83a4049 --- /dev/null +++ b/fr/integrations/langchain/chat-models/mistral-ai.md @@ -0,0 +1,30 @@ +# ChatMistralAI + +## Prérequis + +1. Inscrivez-vous pour un compte [Mistral AI](https://mistral.ai/) +2. Créez une [clé API](https://console.mistral.ai/user/api-keys/) + +## Configuration + +1. **Modèles de Chat** > faites glisser le nœud **ChatMistralAI** + +
+ +2. **Connecter les Identifiants** > cliquez sur **Créer Nouveau** + +
+ +3. Remplissez les identifiants **Mistral AI** + +
+ +4. Voilà [🎉](https://emojipedia.org/party-popper/), vous pouvez maintenant utiliser le **nœud ChatMistralAI** dans Flowise + +
+ +## Ressources + +* [LangChain JS ChatMistralAI](https://js.langchain.com/docs/integrations/chat/mistral) +* [Mistral AI](https://mistral.ai/) +* [Documentation Mistral AI](https://docs.mistral.ai/) \ No newline at end of file diff --git a/fr/integrations/langchain/chat-models/nvidia-nim.md b/fr/integrations/langchain/chat-models/nvidia-nim.md new file mode 100644 index 00000000..4ea04edb --- /dev/null +++ b/fr/integrations/langchain/chat-models/nvidia-nim.md @@ -0,0 +1,93 @@ +# NVIDIA NIM + +## Local + +### Remarque importante sur l'exécution de NIM avec Flowise + +Si une instance NIM existante est déjà en cours d'exécution (par exemple, via ChatRTX de NVIDIA), démarrer une autre instance via Flowise **sans vérifier un point de terminaison existant** peut provoquer des conflits. Ce problème se produit lorsque plusieurs commandes `podman run` sont exécutées sur le même NIM, entraînant des échecs. + +Pour obtenir de l'aide, consultez : + +- **[Forums des développeurs NVIDIA](https://forums.developer.nvidia.com/)** – Pour les problèmes techniques et les questions. +- **[Discord des développeurs NVIDIA](https://discord.gg/nvidiadeveloper)** – Pour l'engagement communautaire et les [annonces](https://discord.com/channels/1019361803752456192/1340013505834647572). + +### Prérequis + +1. Configurez [NVIDIA NIM localement avec WSL2](https://docs.nvidia.com/nim/wsl2/1.0.0/getting-started.html). + +### Flowise + +1. **Modèles de chat** > Faites glisser le nœud **Chat NVIDIA NIM** > Cliquez sur **Configurer NIM localement**. + +
+ +2. Si NIM est déjà installé, cliquez sur **Suivant**. Sinon, cliquez sur **Télécharger** pour démarrer l'installateur. + +
+ +3. Sélectionnez une image de modèle à télécharger. + +
+ +4. Une fois sélectionnée, cliquez sur **Suivant** pour continuer le téléchargement. + +
+ +5. **Téléchargement de l'image** – La durée dépend de la vitesse de connexion Internet. + +
+ +6. En savoir plus sur [Relaxer les contraintes de mémoire](https://docs.nvidia.com/nim/large-language-models/1.7.0/configuration.html#environment-variables). + Le **port hôte** est le port que le conteneur doit mapper à la machine locale. + +
+ +7. **Démarrage du conteneur...** + +
+ +_Remarque : Si vous avez déjà un conteneur en cours d'exécution avec le modèle sélectionné, Flowise vous demandera si vous souhaitez réutiliser le conteneur en cours d'exécution. Vous pouvez choisir de réutiliser le conteneur en cours d'exécution ou d'en démarrer un nouveau avec un port différent._ + +
+ +8. **Enregistrer le flux de discussion** + +9. [🎉](https://emojipedia.org/party-popper/) **Voilà !** Votre nœud **Chat NVIDIA NIM** est maintenant prêt à être utilisé dans Flowise ! + +
+ +## Cloud + +### Prérequis + +1. Connectez-vous ou inscrivez-vous sur [NVIDIA](https://build.nvidia.com/). +2. Dans la barre de navigation en haut, cliquez sur NIM : + +
+ +3. Recherchez le modèle que vous souhaitez utiliser. Pour le télécharger localement, nous allons utiliser Docker : + +
+ +4. Suivez les instructions de configuration de Docker. Vous devez d'abord obtenir une clé API pour tirer l'image Docker : + +
+ +### Flowise + +1. **Modèles de chat** > faites glisser le nœud **Chat NVIDIA NIM** + +
+ +2. Si vous utilisez un point de terminaison hébergé par NVIDIA, vous devez avoir votre clé API. **Connecter les identifiants** > cliquez sur **Créer nouveau.** Cependant, si vous utilisez une configuration locale, cela est optionnel. + +
+ +3. Entrez le nom du modèle et voilà [🎉](https://emojipedia.org/party-popper/), votre **nœud Chat NVIDIA NIM** est maintenant prêt à être utilisé dans Flowise ! + +
+ +### Ressources + +- [NVIDIA LLM Guide de démarrage](https://docs.nvidia.com/nim/large-language-models/latest/getting-started.html) +- [NVIDIA NIM](https://build.nvidia.com/microsoft/phi-3-mini-4k?snippet_tab=Docker) \ No newline at end of file diff --git a/fr/integrations/langchain/document-loaders/README.md b/fr/integrations/langchain/document-loaders/README.md new file mode 100644 index 00000000..061ae7cc --- /dev/null +++ b/fr/integrations/langchain/document-loaders/README.md @@ -0,0 +1,53 @@ +--- +description: LangChain Document Loader Nodes +--- + +# Chargeurs de documents + +*** + +Les chargeurs de documents vous permettent de charger des documents provenant de différentes sources comme PDF, TXT, CSV, notion, confluence, etc. Ils sont souvent utilisés avec[Vector Stores](../vector-stores/)à être mis en baisse comme des intérêts, qui peuvent ensuite récupérer sur la requête. + +### Regardez une introduction sur les chargeurs de documents + +{% embed url = "https://youtu.be/kmtf9snicao"%} + +### Nœuds de chargeur de documents: + +* [Airtable](airtable.md) +* [API Loader](api-loader.md) +* [Apify Website Content Crawler](apify-website-content-crawler.md) +* [BraveSearch Loader](bravesearch-api.md) +* [Cheerio Web Scraper](cheerio-web-scraper.md) +* [Confluence](confluence.md) +* [Csv File](csv-file.md) +* [Custom Document Loader](custom-document-loader.md) +* [Document Store](document-store.md) +* [Docx File](docx-file.md) +* [Epub File](epub-file.md) +* [Figma](figma.md) +* [File](file-loader.md) +* [FireCrawl](firecrawl.md) +* [Folder](folder.md) +* [GitBook](gitbook.md) +* [Github](github.md) +* [Google Drive](google-drive.md) +* [Google Sheets](google-sheets.md) +* [Jira](jira.md) +* [Json File](json-file.md) +* [Json Lines File](jsonlines.md) +* [Microsoft Excel](microsoft-excel.md) +* [Microsoft Powerpoint](microsoft-powerpoint.md) +* [Microsoft Word](microsoft-word.md) +* [Notion](notion.md) +* [PDF Files](pdf-file.md) +* [Plain Text](plain-text.md) +* [Playwright Web Scraper](playwright-web-scraper.md) +* [Puppeteer Web Scraper](puppeteer-web-scraper.md) +* [S3 File Loader](s3-file-loader.md) +* [SearchApi For Web Search](searchapi-for-web-search.md) +* [SerpApi For Web Search](serpapi-for-web-search.md) +* [Spider - web search & crawler](spider-web-scraper-crawler.md) +* [Text File](text-file.md) +* [Unstructured File Loader](unstructured-file-loader.md) +* [Unstructured Folder Loader](unstructured-folder-loader.md) diff --git a/fr/integrations/langchain/document-loaders/airtable.md b/fr/integrations/langchain/document-loaders/airtable.md new file mode 100644 index 00000000..211a24f8 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/airtable.md @@ -0,0 +1,66 @@ +--- +description: Load data from Airtable table. +--- + +# Chargeur de documents Airtable + +

nœud AirTable

+ +AirTable est un service de collaboration cloud qui combine les fonctionnalités d'une feuille de calcul avec une base de données. Ce module fournit des fonctionnalités complètes pour charger et traiter les données des tables Airtable. + +Ce module fournit un chargeur de document Airtable sophistiqué qui peut: +- Chargez des données à partir de bases, de tables et de vues à air spécifiques +- Filtre et sélectionner des champs spécifiques +- Gérer la pagination et les grands ensembles de données +- Prise en charge du filtrage personnalisé avec des formules +- Traiter les données avec des séparateurs de texte +- Personnaliser l'extraction des métadonnées + +## Entrées + +### Paramètres requis +- ** ID de base **: l'identifiant de base Airtable (par exemple, app10ROBDGOX0YNSC) +- ** ID de table **: l'identifiant de table spécifique (par exemple, tbljdmvbrgizbyico) +- ** Connectez les informations d'identification **: AirTable API Credentials + +### Paramètres facultatifs +- ** Voir ID **: Identificateur de vue spécifique (par exemple, viw9urp77id0ce4ee) +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Inclure uniquement les champs **: Liste de noms ou identifiants séparés par des virgules à inclure +- ** Renvoie tout **: s'il faut renvoyer tous les résultats (par défaut: true) +- ** Limit **: Nombre de résultats à retourner lorsque le retour tout est faux (par défaut: 100) +- ** Filtre par formule **: Formule AirTable pour filtrer les enregistrements +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Récupération des données basée sur l'API +- Sélection et filtrage des champs +- Support de pagination +- Filtrage basé sur la formule +- Manipulation des métadonnées personnalisables +- Capacités de division de texte +- Gestion des erreurs pour les entrées non valides + +## Notes +- Nécessite des informations d'identification API Airtable valide +- L'ID de base et l'ID de table sont obligatoires +- Les noms de champ contenant des virgules doivent utiliser des ID de champ à la place +- Les formules de filtre doivent suivre la syntaxe de formule Airtable +- La limitation des taux et les quotas API s'appliquent +- Prend en charge la récupération de données complète et partielle + +## Exemple de structure d'URL +Pour une URL de table comme: +``` +https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee +``` +- ID de base: app10ROBDGOX0YNSC +- ID de table: tbljdmvbrgizbyico +- Afficher l'ID: viw9urp77id0ce4ee + diff --git a/fr/integrations/langchain/document-loaders/api-loader.md b/fr/integrations/langchain/document-loaders/api-loader.md new file mode 100644 index 00000000..3832fe0b --- /dev/null +++ b/fr/integrations/langchain/document-loaders/api-loader.md @@ -0,0 +1,82 @@ +--- +description: Load data from an API. +--- + +# Chargeur de documents API + +

API Loder Node

+ +Le chargeur de document API fournit des fonctionnalités pour charger et traiter les données des API externes à l'aide de requêtes HTTP. Ce module permet une intégration transparente avec des API et services Web RESTful. + +Ce module fournit un chargeur de document API polyvalent qui peut: +- Faire obtenir HTTP et publier des demandes +- Gérer les en-têtes personnalisés et les corps de demande +- Traiter les réponses de l'API dans les documents +- Soutenir les structures de données JSON +- Personnaliser l'extraction des métadonnées +- Traiter les réponses avec des séparateurs de texte + +## Entrées + +### Paramètres requis +- ** URL **: L'URL de point de terminaison de l'API à appeler +- ** Méthode **: Méthode HTTP à utiliser (obtenir ou publier) + +### Paramètres facultatifs +- ** En-têtes **: objet JSON contenant des en-têtes HTTP +- ** Corps **: objet JSON pour le corps de la demande postale +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Prise en charge de la méthode HTTP (GET / POST) +- Configuration de l'en-tête personnalisée +- Demander la personnalisation du corps +- Traitement des réponses +- Gestion des erreurs +- Personnalisation des métadonnées +- Capacités de division de texte + +## Exemple d'utilisation + +### Faire une demande +```json +{ + "method": "GET", + "url": "https://api.example.com/data", + "headers": { + "Authorization": "Bearer token123", + "Accept": "application/json" + } +} +``` + +### Demande de poste +```json +{ + "method": "POST", + "url": "https://api.example.com/data", + "headers": { + "Content-Type": "application/json", + "Authorization": "Bearer token123" + }, + "body": { + "query": "example", + "limit": 10 + } +} +``` + +## Notes +- Prend en charge les formats de demande / réponse JSON +- Gère les réponses d'erreur HTTP +- Traite automatiquement les données de réponse dans les documents +- Peut être combiné avec des séparateurs de texte pour le traitement du contenu +- Prend en charge l'ajout et l'omission des métadonnées personnalisées +- Les réponses d'erreur sont correctement gérées et signalées diff --git a/fr/integrations/langchain/document-loaders/apify-website-content-crawler.md b/fr/integrations/langchain/document-loaders/apify-website-content-crawler.md new file mode 100644 index 00000000..c5ea741e --- /dev/null +++ b/fr/integrations/langchain/document-loaders/apify-website-content-crawler.md @@ -0,0 +1,106 @@ +--- +description: Load data from Apify Website Content Crawler. +--- + +# Crawler de contenu du site Web Apify + +
+ +### Gérer les liens (facultatif) + +1. Entrée URL souhaitée à ramper. +2. Cliquez sur ** Répondre aux liens ** Pour récupérer les liens en fonction des entrées de la méthode ** Get Relative Links ** et ** Obtenir des liens relatifs Limite ** Dans ** Paramètres supplémentaires **. +3. Dans ** Liens rampés ** Section, supprimez les liens indésirables en cliquant sur ** Icône de bac à ordures rouges **. +4. Enfin, cliquez sur ** Enregistrer **. + +
+ +## Sortir + +Charge le contenu de l'URL en tant que document + +## Ressources + +* [LangChain JS Cheerio](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/web_cheerio) +* [Cheerio](https://cheerio.js.org/) diff --git a/fr/integrations/langchain/document-loaders/confluence.md b/fr/integrations/langchain/document-loaders/confluence.md new file mode 100644 index 00000000..ca04758b --- /dev/null +++ b/fr/integrations/langchain/document-loaders/confluence.md @@ -0,0 +1,86 @@ +--- +description: Load data from a Confluence Document +--- + +# Confluence + +## Confluence + +

+ +## Chargeur de documents Confluence + +Confluence est le wiki et la plate-forme de collaboration d'Atlassian. Ce module fournit des fonctionnalités pour charger et traiter le contenu à partir des espaces et des pages de confluence. + +Ce module fournit un chargeur de document à confluence sophistiqué qui peut: + +* Chargez le contenu à partir d'espaces de confluence spécifiques +* Prise en charge des déploiements Cloud et Server / Data Center +* Gérer l'authentification avec plusieurs méthodes +* Limiter le nombre de pages récupérées +* Traiter le contenu avec des séparateurs de texte +* Personnaliser l'extraction des métadonnées + +### Entrées + +#### Paramètres requis + +* ** URL de base **: L'URL de l'instance de confluence (par exemple, https://example.atlassian.net/wiki) +* ** Espace Key **: L'identifiant unique de l'espace de confluence +* ** Connectez les informations d'identification **: Choisissez entre: + * Conditions d'identification de l'API Cloud Confluence (nom d'utilisateur + jeton d'accès) + * Informations sur l'api du serveur Confluence / DC (Token d'accès personnel) + +#### Paramètres facultatifs + +* ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +* ** Limite **: Nombre maximum de pages à récupérer (0 pour illimité) +* ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +### Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +### Caractéristiques + +* Prise en charge multi-déploiement (cloud / serveur / dc) +* Options d'authentification flexibles +* Contrôles de limite de page +* Capacités de traitement du contenu +* Personnalisation des métadonnées +* Gestion des erreurs +* Support de division de texte + +### Méthodes d'authentification + +#### Nuage de confluence + +* Nom d'utilisateur et jeton d'accès +* Token d'accès généré à partir des paramètres du compte atlassian +* Prend en charge l'authentification des jetons API + +#### Serveur de confluence / centre de données + +* Utilise un jeton d'accès personnel +* Jeton généré à partir de l'instance de confluence +* Prend en charge l'accès direct au serveur + +### Notes + +* La clé d'espace peut être trouvée dans les réglages d'espace Confluence +* Différentes méthodes d'authentification pour le cloud vs serveur +* La limitation des taux peut s'appliquer en fonction de l'instance +* Le contenu comprend le texte et les métadonnées de la page +* Prend en charge la récupération de contenu à la fois complète et partielle +* Gestion des erreurs pour les informations d'identification ou URL non valides + +### Trouver une clé d'espace + +Pour trouver votre clé d'espace de confluence: + +1. Accédez à l'espace en confluence +2. Aller dans les paramètres de l'espace +3. Recherchez "Key Space" dans l'aperçu +4. Exemple de format: \ ~ Exemple362906de5d343d49dcdbae5Dexample diff --git a/fr/integrations/langchain/document-loaders/csv-file.md b/fr/integrations/langchain/document-loaders/csv-file.md new file mode 100644 index 00000000..fc0470e2 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/csv-file.md @@ -0,0 +1,37 @@ +--- +description: Load data from CSV files. +--- + +# Fichiers CSV + +

CSV Node de fichier

+ +CSV (valeurs séparées par les virgules) est un format de fichier simple utilisé pour stocker des données tabulaires, telles qu'une feuille de calcul ou une base de données. Ce module fournit des fonctionnalités pour charger et traiter les fichiers CSV dans votre flux de travail. + +Ce module fournit un chargeur de document CSV polyvalent qui peut: +- Chargez des fichiers CSV uniques ou multiples +- Prise en charge des fichiers et fichiers codés en base de base64 à partir du stockage +- Extraire des colonnes spécifiques ou du contenu entier +- Traiter efficacement de grands ensembles de données +- Gérer la gestion des métadonnées personnalisées + +## Entrées + +- ** Fichier CSV **: le (s) fichier (s) CSV (S) à traiter (extension .csv requise) +- ** Splitter de texte ** (facultatif): un séparateur de texte pour traiter le contenu extrait +- ** Extraction à colonne unique ** (facultatif): nom d'une colonne spécifique à extraire +- ** Métadonnées supplémentaires ** (Facultatif): objet JSON avec des métadonnées supplémentaires à ajouter aux documents +- ** omettre les clés de métadonnées ** (facultatif): liste de clés de métadonnées séparées par des virgules pour omettre à partir des métadonnées par défaut + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de tous les documents + +## Caractéristiques +- Support de traitement de fichiers multiples +- Capacité d'extraction à colonne unique +- Gestion efficace des grands ensembles de données +- Manipulation des métadonnées personnalisables +- Prise en charge de l'intégration du stockage +- Capacités de manutention de base64 et de blob diff --git a/fr/integrations/langchain/document-loaders/custom-document-loader.md b/fr/integrations/langchain/document-loaders/custom-document-loader.md new file mode 100644 index 00000000..5d018013 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/custom-document-loader.md @@ -0,0 +1,91 @@ +--- +description: Custom function for loading documents. +--- + +# Chargeur de documents personnalisé + +

Node de chargeur de document personnalisé

+ +Le chargeur de documents personnalisé offre la possibilité de créer des fonctionnalités de chargement de documents personnalisées à l'aide de JavaScript. Ce module permet le traitement de documents flexible et personnalisé via des fonctions définies par l'utilisateur. + +Ce module fournit un chargeur de documents flexible qui peut: +- Exécuter des fonctions JavaScript personnalisées pour le chargement des documents +- Gérer dynamiquement les variables d'entrée +- Prise en charge des sorties de document et de texte +- Exécuter dans un environnement en sable +- Contexte de flux d'accès et variables +- Traiter les métadonnées personnalisées + +## Entrées + +### Paramètres requis +- ** Fonction JavaScript **: code personnalisé qui revient soit: + - Tableau d'objets de document (pour la sortie du document) + - String (pour la sortie du texte) + +### Paramètres facultatifs +- ** Variables d'entrée **: Variables JSON contenant des variables accessibles dans la fonction avec $ Prefix + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Environnement d'exécution de boîte à sable +- Support d'injection variable +- Accès au contexte de flux +- Support de dépendance personnalisé +- Gestion des erreurs +- Protection contre le délai +- Validation d'entrée + +## Structure de document +Lors du retour des documents, chaque objet doit avoir: +```javascript +{ + pageContent: 'Document Content', + metadata: { + title: 'Document Title', + // ... other metadata + } +} +``` + +## Exemple d'utilisation + +### Documer la sortie +```javascript +return [ + { + pageContent: 'Document Content', + metadata: { + title: 'Document Title', + source: 'Custom Source' + } + } +] +``` + +### Sortie de texte +```javascript +return "Processed text content" +``` + +## Contexte disponible +- ** $ entrée **: valeur d'entrée transmise à la fonction +- ** $ vars **: Accès aux variables de flux +- ** $ Flow **: Flow Context Object Contenant: + - chatflowid + - de session + - chatitide + - saisir + +## Notes +- Fonctions exécutées dans un bac à sable sécurisé +- Tempsion d'exécution de 10 secondes +- Dépendances intégrées disponibles +- Dépendances externes configurables +- Les variables d'entrée doivent être valides JSON +- Gestion des erreurs pour les rendements non valides +- Prend en charge les opérations asynchrones diff --git a/fr/integrations/langchain/document-loaders/document-store.md b/fr/integrations/langchain/document-loaders/document-store.md new file mode 100644 index 00000000..4d5cf4a2 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/document-store.md @@ -0,0 +1,133 @@ +--- +description: Load data from pre-configured document stores. +--- + +# Magasin de documents + +
+ +Le chargeur de magasin de documents vous permet de charger des données des magasins de documents préconfigurés dans votre base de données. Ce chargeur fournit un moyen pratique d'accéder et d'utiliser des documents précédemment traités et stockés dans vos workflows. + +## Caractéristiques + +* Charge des documents des magasins synchronisés +* Gestion automatique des métadonnées +* Formats de sortie multiples +* Sélection de magasins asynchrones +* Intégration de la base de données +* Récupération de documents basée sur des morceaux +* Support de métadonnées JSON + +## Comment ça marche + +1. ** Sélection du magasin **: + * Répertorie tous les magasins disponibles disponibles en statut «synchronisé» + * Fournit des informations sur le magasin, y compris le nom et la description + * Permet la sélection des magasins synchronisés uniquement +2. ** Retriel de document **: + * Rechet des morceaux de document de la boutique sélectionnée + * Reconstruit les documents avec des métadonnées originales + * Maintient la structure et les relations du document + +## Paramètres + +### Paramètres requis + +* ** Sélectionnez Store **: Choisissez parmi les magasins de documents synchronisés disponibles + * Affiche le nom et la description du magasin + * Affiche uniquement les magasins en statut «synchronisé» + * Mis à jour dynamiquement en fonction du contenu de la base de données + +## Sorties + +Le chargeur fournit deux formats de sortie: + +### Documer la sortie + +Renvoie un tableau d'objets de document, chacun contenant: + +* ** PageContent **: le contenu réel du morceau de document +* ** Metadata **: Métadonnées du document original au format JSON + +### Sortie de texte + +Renvoie une chaîne concaténée contenant: + +* Tous les morceaux de document +* Séparés par Newlines +* Caractères correctement échappés + +## Intégration de la base de données + +Le chargeur s'intègre à votre base de données: + +* Connexion de source de données Typeorm +* Gestion des entités du magasin de documents +* Stockage et récupération basés sur des morceaux +* Conservation des métadonnées + +## Structure de document + +Chaque document chargé contient: + +```typescript +{ + pageContent: string, // The actual content + metadata: { // Parsed JSON metadata + // Original document metadata + // Store-specific information + // Custom metadata fields + } +} +``` + +## Exemples d'utilisation + +### Sélection de magasin de base + +```json +{ + "selectedStore": "store-id-123" +} +``` + +### Accéder au contenu du document + +```typescript +// Document output format +[ + { + "pageContent": "Document content here...", + "metadata": { + "source": "original-file.pdf", + "page": 1, + "category": "reports" + } + } +] + +// Text output format +"Document content here...\nNext document content here...\n" +``` + +## Meilleures pratiques + +1. Assurez-vous que les magasins sont synchronisés avant l'accès +2. Choisissez le format de sortie approprié pour votre cas d'utilisation +3. Gérer les métadonnées de manière appropriée dans votre flux de travail +4. Considérez la taille du morceau lors du traitement de grands documents +5. Surveiller les performances de la base de données avec les grands magasins + +## Notes + +* Seuls les magasins synchronisés sont disponibles pour la sélection +* Les métadonnées sont automatiquement analysées de JSON +* Les documents sont reconstruits à partir de morceaux +* Prend en charge les formats de sortie de document et de texte +* S'intègre à Typeorm pour l'accès à la base de données +* Gère les caractères d'échappement dans la sortie du texte +* Maintient la structure du document original + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference/)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/document-loaders/docx-file.md b/fr/integrations/langchain/document-loaders/docx-file.md new file mode 100644 index 00000000..37bb052c --- /dev/null +++ b/fr/integrations/langchain/document-loaders/docx-file.md @@ -0,0 +1,37 @@ +--- +description: Load data from DOCX files. +--- + +# Fichier docx + +

docx file node

+ +Microsoft Word Document (DOCX) est un format de document largement utilisé pour la création et l'édition de documents texte. Ce module fournit des fonctionnalités pour charger et traiter les fichiers docx dans votre workflow. + +Ce module fournit un chargeur de document DOCX complet qui peut: + +* Chargez des fichiers Docx uniques ou multiples +* Prise en charge des fichiers et fichiers codés en base de base64 à partir du stockage +* Extraire le contenu du texte avec des métadonnées +* Intégrer avec des séparateurs de texte pour le traitement du contenu +* Gérer la gestion des métadonnées personnalisées + +## Entrées + +* ** Fichier Docx **: le (s) fichier (s) DOCX à traiter (extension .docx requise) +* ** Splitter de texte ** (facultatif): un séparateur de texte pour traiter le contenu extrait +* ** Métadonnées supplémentaires ** (Facultatif): objet JSON avec des métadonnées supplémentaires à ajouter aux documents +* ** omettre les clés de métadonnées ** (facultatif): liste de clés de métadonnées séparées par des virgules pour omettre à partir des métadonnées par défaut + +## Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de tous les documents + +## Caractéristiques + +* Support de traitement de fichiers multiples +* Options de division de texte flexible +* Manipulation des métadonnées personnalisables +* Prise en charge de l'intégration du stockage +* Capacités de manutention de base64 et de blob diff --git a/fr/integrations/langchain/document-loaders/epub-file.md b/fr/integrations/langchain/document-loaders/epub-file.md new file mode 100644 index 00000000..a6301303 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/epub-file.md @@ -0,0 +1,62 @@ +# Chargeur de fichiers EPUB + +Epub (publication électronique) est une norme de livre électronique gratuite et ouverte par le Forum international de publication numérique (IDPF). Ce module fournit des fonctionnalités pour charger et traiter les fichiers EPUB dans votre flux de travail. + +Ce module fournit un chargeur de documents EPUB sophistiqué qui peut: +- Chargez des fichiers EPUB simples ou multiples +- Prise en charge des fichiers et fichiers codés en base de base64 à partir du stockage +- Extraire du contenu par chapitre ou par fichier +- Traiter le contenu avec des séparateurs de texte +- Gérer l'extraction des métadonnées +- Gérer le traitement de fichiers temporaire + +## Entrées + +### Paramètres requis +- ** Fichier EPUB **: le (s) fichier (s) pour traiter (extension .pub requis) +- ** Utilisation **: Choisissez entre: + - Un document par chapitre: Contenu divisé par les chapitres + - Un document par fichier: traiter le fichier entier comme un seul document + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Traitement de fichiers multiples +- Division au niveau du chapitre +- Traitement au niveau des fichiers +- Intégration de stockage +- Personnalisation des métadonnées +- Support de division de texte +- Traitement des fichiers temporaires +- Gestion des erreurs + +## Modes de traitement + +### Par mode chapitre +- Crée des documents distincts pour chaque chapitre +- Maintient la structure du chapitre +- Conserve les métadonnées du chapitre +- Mieux pour l'analyse détaillée + +### Par mode de fichier +- Traite le fichier entier comme un seul document +- Maintient la structure globale +- Organisation de documents plus simples +- Mieux pour l'analyse de la vue d'ensemble + +## Notes +- Prend en charge les fichiers locaux et basés sur le stockage +- Gère le contenu codé de base64 +- Nettoie automatiquement les fichiers temporaires +- Préserve la structure du document +- Prend en charge l'ajout de métadonnées personnalisées +- Gestion des erreurs pour les fichiers non valides +- Traitement économe en mémoire \ No newline at end of file diff --git a/fr/integrations/langchain/document-loaders/figma.md b/fr/integrations/langchain/document-loaders/figma.md new file mode 100644 index 00000000..d60f0a1b --- /dev/null +++ b/fr/integrations/langchain/document-loaders/figma.md @@ -0,0 +1,67 @@ +--- +description: Load data from a Figma file. +--- + +# Chargeur de documents Figma + +

nœud

+ +Figma est une application Web collaborative pour la conception d'interface. Ce module fournit des fonctionnalités pour charger et traiter le contenu à partir des fichiers Figma, y ​​compris du texte, des composants et des métadonnées. + +Ce module fournit un chargeur de document Figma sophistiqué qui peut: +- Chargez le contenu à partir de fichiers Figma spécifiques +- Extraire du texte à partir de nœuds sélectionnés +- Traiter le contenu récursivement +- Gérer l'authentification avec l'API Figma +- Traiter le contenu avec des séparateurs de texte +- Personnaliser l'extraction des métadonnées + +## Entrées + +### Paramètres requis +- ** Clé de fichier **: l'identifiant unique du fichier Figma (à partir de l'URL du fichier) +- ** ID de nœud **: Liste des identifiants de nœud séparés par des virgules à extraire +- ** Connectez les informations d'identification **: les informations d'identification de l'API Figma (jeton d'accès) + +### Paramètres facultatifs +- ** récursif **: s'il faut traiter les nœuds récursivement +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Extraction de contenu basée sur l'API +- Sélection de contenu au niveau du nœud +- Traitement récursif +- Support de division de texte +- Personnalisation des métadonnées +- Gestion des erreurs +- Gestion de l'authentification + +## Format de clé de fichier +La touche de fichier se trouve dans l'URL du fichier Figma: +``` +https://www.figma.com/file/:key/:title +``` +Exemple: dans`https://www.figma.com/file/12345/Website`, la clé de fichier est`12345` + +## ID de nœud +Pour obtenir des ID de nœud: +1. Installez le plugin d'inspecteur de nœud sur Figma +2. Sélectionnez les éléments souhaités +3. Copiez les ID de nœud de l'inspecteur +4. Utilisez le format séparé des virgules: "0, 1, 2" + +## Notes +- Nécessite un jeton d'accès Figma valide +- Les ID de nœud doivent être valides pour le fichier +- Prend en charge l'extraction de contenu récursif +- Peut traiter plusieurs nœuds à la fois +- Gère les limites et les erreurs du taux de l'API +- Préserve la hiérarchie des nœuds dans les métadonnées +- Prend en charge l'ajout de métadonnées personnalisées diff --git a/fr/integrations/langchain/document-loaders/file-loader.md b/fr/integrations/langchain/document-loaders/file-loader.md new file mode 100644 index 00000000..91a204bf --- /dev/null +++ b/fr/integrations/langchain/document-loaders/file-loader.md @@ -0,0 +1,86 @@ +# Déposer + +
+ +Le chargeur de fichiers est un chargeur de document polyvalent qui prend en charge plusieurs formats de fichiers, notamment TXT, JSON, CSV, DOCX, PDF, Excel, PowerPoint, etc. Ce module fournit une interface unifiée pour le chargement et le traitement de divers types de fichiers. + +Ce module fournit un chargeur de fichiers sophistiqué qui peut: + +* Chargez plusieurs formats de fichiers +* Prise en charge des fichiers et fichiers codés en base de base64 à partir du stockage +* Gérer les options de traitement spécifiques au PDF +* Traiter JSON et JSONL avec l'extraction du pointeur +* Prise en charge du fractionnement du texte +* Personnaliser l'extraction des métadonnées +* Gérer l'intégration du stockage des fichiers + +## Entrées + +### Paramètres requis + +* ** Fichier **: le (s) fichier (s) à traiter (prend en charge plusieurs formats) + +### Paramètres facultatifs + +* ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +* ** Utilisation du PDF **: Choisissez entre: + * Un document par page + * Un document par fichier +* ** Utilisez le héritage Build **: Utilisez la construction héritée pour les problèmes de compatibilité PDF +* ** Extraction du pointeur JSONL **: Nom du pointeur pour les fichiers JSONL +* ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +## Types de fichiers pris en charge + +* Fichiers texte (.txt) +* Fichiers JSON (.json) +* Fichiers JSONL (.jsonl) +* Fichiers CSV (.csv) +* Fichiers PDF (.pdf) +* Documents de mots (.docx) +* Files Excel (.xlsx, .xls) +* Fichiers PowerPoint (.pptx, .ppt) +* Et plus ... + +## Caractéristiques + +* Support multi-format +* Intégration de stockage +* Options de traitement PDF +* Extraction du pointeur JSON +* Support de division de texte +* Personnalisation des métadonnées +* Gestion des erreurs +* Détection de type mime + +## Options de traitement de fichiers + +### Traitement PDF + +* Fractionnement par page +* Mode de document unique +* Support de construction hérité +* Compatibilité OCR + +### Traitement JSON / JSONL + +* Extraction basée sur le pointeur +* Gestion des données structurées +* Traitement du tableau +* Support d'objet imbriqué + +## Notes + +* Détecte automatiquement le type de fichier +* Gère plusieurs fichiers simultanément +* Prend en charge l'intégration du stockage de fichiers +* Préserve les métadonnées du fichier +* Gère efficacement les fichiers volumineux +* Gestion des erreurs pour les fichiers non valides +* Traitement économe en mémoire diff --git a/fr/integrations/langchain/document-loaders/firecrawl.md b/fr/integrations/langchain/document-loaders/firecrawl.md new file mode 100644 index 00000000..edbc8594 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/firecrawl.md @@ -0,0 +1,94 @@ +--- +description: Load data from URL using FireCrawl. +--- + +# Pompier + +

Node Firecraw + +# Chargeur de documents Firecraw + +Firecrawl est un puissant service Web rampant et grattant qui offre des capacités avancées pour extraire le contenu des sites Web. Ce module permet le chargement et le traitement du contenu Web via l'API Firecrawl. + +Ce module fournit un robot Web sophistiqué qui peut: +- Gratter les pages Web uniques +- Crawl entiers des sites Web +- Extraire des données structurées +- Gérer le contenu rendu javascript +- Traiter le contenu avec des séparateurs de texte +- Personnaliser l'extraction des métadonnées +- Prise en charge de plusieurs modes de fonctionnement + +## Entrées + +### Paramètres requis +- ** URL **: la page Web ou l'URL du site Web à traiter +- ** Connectez les informations d'identification **: FireCrawl API Création d'identification +- ** Mode **: Choisissez entre: + - Groupement: extraction à une seule page + - Crawl: site Web de plusieurs pages rampant + - Extrait: Extraction de données structurées + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Options de rupture **: + - Inclure des balises: balises HTML à inclure + - Exclure les balises: balises HTML pour exclure + - Mobile: utilisez un agent utilisateur mobile + - Skip TLS Vérification: contourner les contrôles SSL + - Timeout: Demandez le délai d'expiration +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Plusieurs modes de fonctionnement +- Options de grattage avancées +- Extraction de données structurées +- Rendu javascript +- Émulation de l'appareil mobile +- Paramètres de délai d'attente personnalisés +- Gestion des erreurs + +## Modes de fonctionnement + +### Mode de grat +- Traitement à une seule page +- Extraction de contenu principale +- Sélection de format +- Filtrage de balises personnalisé + +### Mode de crawl +- Rampage de plusieurs pages +- Manipulation du sous-domaine +- Traitement du site +- Extraction de liaison + +### Mode d'extrait +- Extraction de données structurées +- Analyse de schéma +- Extraction à base de LLM +- Invites d'extraction personnalisées + +## Structure de document +Chaque document contient: +- ** PageContent **: Contenu extrait au format Markdown +- ** Metadata **: + - Titre: Titre de la page + - Description: Meta Description + - Langue: contenu langue + - SourceUrl: URL d'origine + - Métadonnées personnalisées supplémentaires + +## Notes +- Nécessite une clé API Firecrawl valide +- Prend en charge plusieurs formats de contenu +- Les manches limites de taux +- Surveillance de l'état du travail +- Gestion des erreurs et tentatives +- Options de demande personnalisables +- Traitement économe en mémoire diff --git a/fr/integrations/langchain/document-loaders/folder.md b/fr/integrations/langchain/document-loaders/folder.md new file mode 100644 index 00000000..873705ef --- /dev/null +++ b/fr/integrations/langchain/document-loaders/folder.md @@ -0,0 +1,92 @@ +# Dossier avec chargeur de fichiers + +

Folder avec fichiers nœud

+ +Le chargeur de dossier fournit des fonctionnalités pour charger et traiter plusieurs fichiers à partir d'un répertoire. Ce module prend en charge une large gamme de formats de fichiers et peut traiter récursivement des sous-répertoires. + +Ce module fournit un chargeur de dossier sophistiqué qui peut: +- Charger plusieurs types de fichiers simultanément +- Traiter les répertoires récursivement +- Gérer divers formats de documents +- Prise en charge du traitement spécifique au PDF +- Traiter les fichiers de données structurées +- Personnaliser l'extraction des métadonnées +- Prise en charge du fractionnement du texte + +## Entrées + +### Paramètres requis +- ** Chemin de dossier **: Chemin vers le répertoire contenant des fichiers +- ** récursif **: s'il faut traiter les sous-répertoires + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Utilisation du PDF **: Choisissez entre: + - Un document par page + - Un document par fichier +- ** Extraction du pointeur JSONL **: Nom du pointeur pour les fichiers JSONL +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Types de fichiers pris en charge + +### Documents +- PDF (.pdf) +- Word (.doc, .docx) +- Excel (.xls, .xlsx, .xlsm, .xlsb) +- PowerPoint (.ppt, .pptx) +- Texte (.txt) +- Markdown (.md, .markdown) +- Html (.html) +- XML (.xml) + +### Fichiers de données +- JSON (.json) +- JSONL (.jsonl) +- CSV (.csv) + +### Langues de programmation +- Python (.py, .python) +- Javascript (.js) +- TypeScript (.ts) +- Java (.java) +- C / c ++ (.c, .cpp, .h) +- C # (.cs) +- Ruby (.rb, .Ruby) +- Aller (.go) +- Php (.php) +- Swift (.swift) +- Rust (.RS) +- Scala (.scala, .sc) +- Kotlin (.kt) +- Solidité (.sol) + +### Technologies Web +- CSS (.css) +- SCSS (.SCSS) +- Moins (. +- SQL (.SQL) +- Tampons de protocole (.proto) + +## Caractéristiques +- Support multi-format +- Traitement répertoire récursif +- Options de traitement PDF +- Gestion des données structurées +- Support de division de texte +- Personnalisation des métadonnées +- Gestion des erreurs + +## Notes +- Détecte automatiquement les types de fichiers +- Gère les grands répertoires +- Préserve les métadonnées du fichier +- Traitement économe en mémoire +- Prend en charge les extensions de fichiers personnalisées +- Gestion des erreurs pour les fichiers non valides +- Formats de sortie flexibles \ No newline at end of file diff --git a/fr/integrations/langchain/document-loaders/gitbook.md b/fr/integrations/langchain/document-loaders/gitbook.md new file mode 100644 index 00000000..813ac3c9 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/gitbook.md @@ -0,0 +1,77 @@ +--- +description: Load data from GitBook. +--- + +# Gitbook + +

gitbook nœud

+ +# Chargeur de documents gitbook + +GitBook est une plate-forme de documentation moderne qui aide les équipes à partager les connaissances. Ce module fournit des fonctionnalités pour charger et traiter le contenu à partir des sites de documentation GitBook. + +Ce module fournit un chargeur de documents Gitbook sophistiqué qui peut: +- Chargez le contenu à partir de pages de gitbook spécifiques +- Crawl des sites de documentation entièrement gitbook +- Extrait de contenu structuré +- Traiter le contenu avec des séparateurs de texte +- Personnaliser l'extraction des métadonnées +- Gérer le chargement récursif de la page + +## Entrées + +### Paramètres requis +- ** Chemin Web **: L'URL vers la page Gitbook ou le chemin racine + - Page unique: par exemple, https://docs.gitbook.com/product-tour/navigation + - Chemin racine: par exemple, https://docs.gitbook.com/ + +### Paramètres facultatifs +- ** devrait charger tous les chemins **: s'il faut charger de manière récursivement toutes les pages du chemin racine +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Chargement d'une page +- Site récursif rampant +- Extraction de contenu +- Support de division de texte +- Personnalisation des métadonnées +- Gestion des erreurs +- Gestion des chemins + +## Modes de chargement + +### Mode de page unique +- Charge le contenu d'une page spécifique +- Extrait le contenu de la page et les métadonnées +- Conserve la structure de la page +- Plus rapide pour un accès à une seule page + +### Tous les chemins +- Charge récursivement toutes les pages de la racine +- Maintient la hiérarchie du site +- Extrait tous les contenus disponibles +- Conserve la structure de navigation + +## Structure de document +Chaque document contient: +- ** PageContent **: Extrait du contenu de la page +- ** Metadata **: + - Titre: Titre de la page + - URL: URL de la page d'origine + - Métadonnées personnalisées supplémentaires + +## Notes +- Prend en charge le chargement unique de la page et du site complet +- Gère le contenu dynamique de Gitbook +- Préserve la structure du document +- Prend en charge l'ajout de métadonnées personnalisées +- Gestion des erreurs pour les URL non valides +- Traitement économe en mémoire +- Formats de sortie flexibles diff --git a/fr/integrations/langchain/document-loaders/github.md b/fr/integrations/langchain/document-loaders/github.md new file mode 100644 index 00000000..caf678fc --- /dev/null +++ b/fr/integrations/langchain/document-loaders/github.md @@ -0,0 +1,82 @@ +--- +description: Load data from a GitHub repository. +--- + +# Chargeur de documents github + +

nœud github

+ +GitHub est une plate-forme pour le contrôle et la collaboration de versions. Ce module fournit des fonctionnalités pour charger et traiter le contenu à partir des référentiels GitHub, soutenant les référentiels publics et privés. + +Ce module fournit un chargeur de document GitHub sophistiqué qui peut: +- Chargez le contenu des référentiels GitHub +- Prise en charge de l'accès au référentiel privé +- Traiter les référentiels récursivement +- Gérer les instances de github personnalisées +- Contrôler la concurrence et les tentatives +- Personnaliser le filtrage des fichiers +- Traiter le contenu avec des séparateurs de texte + +## Entrées + +### Paramètres requis +- ** REPO LIEN **: L'URL du référentiel GitHub (par exemple, https://github.com/flowiseai/flowise) +- ** Branch **: La branche pour charger le contenu de (par défaut: Main) + +### Paramètres facultatifs +- ** Connectez les informations d'identification **: les informations d'identification de l'API GitHub (requises pour les références privées) +- ** récursif **: s'il faut traiter les sous-répertoires +- ** MAX CONCURENCE **: Nombre maximum de charges de fichiers simultanées +- ** URL de base GitHub **: URL de base GitHub personnalisée pour les instances d'entreprise +- ** API d'instance GitHub **: URL de l'API GitHub personnalisée pour les instances d'entreprise +- ** Ignorer les chemins **: tableau des modèles globaux pour les chemins à ignorer +- ** MAX RETRES **: Nombre maximum de tentatives de réessayer +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Support public / privé Repo +- Prise en charge de l'instance d'entreprise +- Traitement répertoire récursif +- Contrôle de la concurrence +- Réessayer le mécanisme +- Filtrage de chemin +- Support de division de texte +- Personnalisation des métadonnées + +## Méthodes d'authentification + +### Référentiels publics +- Aucune authentification requise +- Les limites de taux s'appliquent +- Limité au contenu public + +### Référentiels privés +- Nécessite un jeton d'accès GitHub +- Limites de taux plus élevées +- Accès au contenu privé +- Assistance d'entreprise + +## Structure de document +Chaque document contient: +- ** PageContent **: contenu de fichier +- ** Metadata **: + - Source: chemin de fichier dans le référentiel + - branche: branche du référentiel + - commit: engager le hachage + - Métadonnées personnalisées supplémentaires + +## Notes +- Soutient les reposs publics et privés +- Instances GitHub d'entreprise prises en charge +- La limitation du taux géré automatiquement +- Backoff exponentiel pour les tentatives +- Filtrage de chemin avec les modèles globaux +- Traitement économe en mémoire +- Gestion des erreurs pour les références non valides diff --git a/fr/integrations/langchain/document-loaders/google-drive.md b/fr/integrations/langchain/document-loaders/google-drive.md new file mode 100644 index 00000000..c2d8f8b5 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/google-drive.md @@ -0,0 +1,111 @@ +# Google Drive + +
+ +Google Drive est un service de stockage et de synchronisation de fichiers cloud. Ce module fournit des fonctionnalités pour charger et traiter les fichiers de Google Drive, prenant en charge divers formats de fichiers et Google Workspace Documents. + +Ce module fournit un chargeur de documents Google Drive sophistiqué qui peut: + +* Chargez plusieurs types de fichiers +* Traiter les documents Google Workspace +* Gérer le chargement basé sur les dossiers +* Soutenir les lecteurs partagés +* Traiter les fichiers récursivement +* Personnaliser le filtrage des types de fichiers +* Gérer l'authentification OAuth2 + +### Paramètres requis + +* ** Connectez les informations d'identification **: Google Drive OAuth2 Informations d'identification. Se référer à[#Google Drive](../tools/google-drive.md) +* ** Sélectionnez des fichiers ** ou ** ID de dossier **: Choisissez des fichiers spécifiques ou fournissez un ID de dossier + +### Paramètres facultatifs + +* ** Types de fichiers **: Types de fichiers à charger: + * Google Docs + * Feuilles Google + * Glissements Google + * Fichiers pdf + * Fichiers texte + * Documents de mots + * PowerPoint + * Fichiers Excel +* ** Inclure les sous-dossiers **: les fichiers de processus dans les sous-dossiers +* ** Inclure les lecteurs partagés **: Accès aux fichiers à partir de lecteurs partagés +* ** Fichiers max **: nombre maximum de fichiers à charger (par défaut: 50) +* ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +* ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +## Types de fichiers pris en charge + +### Google Workspace + +* Google Docs (application / vnd.google-apps.document) +* Google Sheets (application / vnd.google-apps.spreadsheet) +* Google Slides (application / vnd.google-apps.presentation) + +### Microsoft Office + +* Word (.docx) +* Excel (.xlsx) +* PowerPoint (.pptx) + +### Autres formats + +* PDF (.pdf) +* Fichiers texte (.txt) + +## Caractéristiques + +* Authentification OAuth2 +* Prise en charge du type de fichier multiple +* Traitement des dossiers +* Accès au lecteur partagé +* Filtrage de type de fichier +* Support de division de texte +* Personnalisation des métadonnées +* Gestion des erreurs + +## Méthodes de chargement + +### Mode de sélection de fichiers + +* Sélection directe des fichiers +* Prise en charge des fichiers multiples +* Filtrage de type de fichier +* Conservation des métadonnées + +### Mode de dossier + +* Traitement des dossiers récursifs +* Support de sous-dossier +* Filtrage de type de fichier +* Traitement par lots + +## Structure de document + +Chaque document contient: + +* ** PageContent **: Extrait du contenu du fichier +* ** Metadata **: + * Nom de fichier: nom de fichier d'origine + * FileType: Type MIME + * FileID: ID de fichier Google Drive + * Source: chemin de fichier / URL + * Métadonnées personnalisées supplémentaires + +## Notes + +* Nécessite une authentification OAuth2 +* Les manches limites de taux +* Prend en charge les grands fichiers +* Gestion temporaire des fichiers +* Traitement économe en mémoire +* Gestion des erreurs pour les fichiers non valides +* Rafraîchissement automatique des jetons diff --git a/fr/integrations/langchain/document-loaders/google-sheets.md b/fr/integrations/langchain/document-loaders/google-sheets.md new file mode 100644 index 00000000..07679454 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/google-sheets.md @@ -0,0 +1,99 @@ +# Feuilles Google + +
+ +Google Sheets est une application de feuille de calcul basée sur le Web. Ce module fournit des fonctionnalités pour charger et traiter les données à partir des documents Google Sheets, en prenant en charge diverses options de formatage de données et la sélection des feuilles. + +Ce module fournit un chargeur de documents Google Sheets sophistiqué qui peut: + +* Chargez les données de plusieurs feuilles de calcul +* Sélectionnez des feuilles et des gammes spécifiques +* Gérer les valeurs formatées et non formatées +* Processus des formules et calculs +* Personnaliser la manipulation de l'en-tête +* Traiter le contenu avec des séparateurs de texte +* Gérer l'authentification OAuth2 + +## Entrées + +### Paramètres requis + +* ** Connectez les informations d'identification **: Google Sheets OAuth2 Informations d'identification. Se référer à[#Google Sheets](../tools/google-sheets.md) +* ** Sélectionnez le feuille de calcul **: Choisissez des feuilles de calcul (s) dans votre Google Drive + +### Paramètres facultatifs + +* ** Noms de feuilles **: Liste des noms de feuilles séparées par des virgules à charger +* ** Plage **: Plage spécifique à charger (par exemple, A1: E10) +* ** Inclure les en-têtes **: s'il faut inclure la première ligne comme en-têtes (par défaut: true) +* ** Option de rendu de valeur **: comment les valeurs doivent être représentées: + * Valeur formatée: comme indiqué dans l'interface utilisateur + * Valeur non formatée: valeurs brutes + * Formule: formules originales +* ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +* ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques + +* Authentification OAuth2 +* Prise en charge de la feuille de calcul multiple +* Sélection de feuilles +* Spécification de plage +* Manipulation en tête +* Options de formatage de valeur +* Support de division de texte +* Personnalisation des métadonnées + +## Options de rendu de valeur + +### Valeur formatée + +* Valeurs affichées dans l'interface utilisateur +* Comprend le formatage +* Nombres avec des décimales / devises +* Dates au format spécifié + +### Valeur non formatée + +* Valeurs de cellules brutes +* Nombres sans formatage +* Dates comme numéros de série +* Booléen comme vrai / faux + +### Formule + +* Formules originales +* Références cellulaires +* Fonctions +* Calculs + +## Structure de document + +Chaque document contient: + +* ** PageContent **: Contenu de la feuille formatée +* ** Metadata **: + * SpreadsheetID: ID Google Sheets + * nom de calcul: nom de document + * Nom de feuille: nom de la feuille + * Plage: plage sélectionnée + * En-têtes: en-têtes de colonne (si inclus) + * LastModified: Dernière date de modification + * URL: lien de vue Web + * Métadonnées personnalisées supplémentaires + +## Notes + +* Nécessite une authentification OAuth2 +* Les manches limites de taux +* Prend en charge de grandes feuilles de calcul +* Traitement économe en mémoire +* Gestion des erreurs pour les gammes non valides +* Rafraîchissement automatique des jetons +* Accès aux données en temps réel diff --git a/fr/integrations/langchain/document-loaders/jira.md b/fr/integrations/langchain/document-loaders/jira.md new file mode 100644 index 00000000..ce96aee9 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/jira.md @@ -0,0 +1,76 @@ +# Chargeur de documents Jira + +
+ +Jira est un outil de suivi et de gestion de projet populaire. Ce module fournit des fonctionnalités pour charger et traiter les problèmes des projets JIRA, prenant en charge diverses options de filtrage et personnalisation des métadonnées. + +Ce module fournit un chargeur de document Jira sophistiqué qui peut: +- Chargez les problèmes des projets Jira +- Filtre les problèmes par date de création +- Contrôler la taille du lot pour les demandes +- Traiter le contenu avec des séparateurs de texte +- Personnaliser l'extraction des métadonnées +- Gérer l'authentification de l'API + +## Entrées + +### Paramètres requis +- ** Connectez les informations d'identification **: JIRA API Identifiés (nom d'utilisateur et jeton d'accès) +- ** Hôte **: URL de l'instance Jira (par exemple, https://jira.example.com) +- ** Clé du projet **: La clé du projet JIRA pour charger des problèmes de + +### Paramètres facultatifs +- ** Limite par demande **: Nombre de problèmes à récupérer par demande API (par défaut: 100) +- ** Créé après **: Filtrez les problèmes créés après une date spécifique (par exemple, 2024-01-01) +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Authentification des jetons API +- Chargement du problème basé sur les projets +- Filtrage de date de création +- Contrôle de la taille du lot +- Support de division de texte +- Personnalisation des métadonnées +- Formats de sortie flexibles + +## Authentification +Le chargeur nécessite: +- Nom d'utilisateur Jira +- Token d'accès API +- URL de l'hôte de votre instance Jira + +## Structure de document +Chaque document contient: +- ** PageContent **: émettez le contenu et la description +- ** Metadata **: + - Métadonnées spécifiques au problème (personnalisables) + - Informations sur le projet + - Dates de création + - Statut d'émission + - Métadonnées personnalisées supplémentaires + +## Manipulation des métadonnées +Deux façons de personnaliser les métadonnées: +1. ** Metadata supplémentaires **: Ajouter de nouveaux champs de métadonnées + - Spécifiez comme objet JSON + - Fusionné avec les métadonnées existantes + +2. ** omettre les clés de métadonnées **: supprimer les métadonnées indésirables + - Liste des clés séparées par des virgules + - Utiliser * pour supprimer toutes les métadonnées par défaut + - Clés imbriquées prises en charge (par exemple, Key1, Key2, Key3.NestedKey1) + +## Notes +- Gère la limitation du taux de l'API +- Traitement efficace des lots +- Chargement économe en mémoire +- Gestion des erreurs pour les demandes d'API +- Prend en charge Jira Cloud et auto-hébergé +- Accès aux données du problème en temps réel diff --git a/fr/integrations/langchain/document-loaders/json-file.md b/fr/integrations/langchain/document-loaders/json-file.md new file mode 100644 index 00000000..d9aed2ba --- /dev/null +++ b/fr/integrations/langchain/document-loaders/json-file.md @@ -0,0 +1,66 @@ +--- +description: Load data from JSON files. +--- + +# Fichier json + +

JSON Node de fichier

+ +JSON (Notation d'objet JavaScript) est un format d'interchange de données léger qui est facile à lire et à écrire pour les humains et facile à analyser et à générer. Ce module fournit des fonctionnalités avancées pour charger et traiter les fichiers JSON dans votre flux de travail. + +Ce module fournit un chargeur de documents JSON sophistiqué qui peut: + +* Chargez des fichiers JSON simples ou multiples +* Prise en charge des fichiers et fichiers codés en base de base64 à partir du stockage +* Extraire des données spécifiques à l'aide de pointeurs JSON +* Manipuler l'extraction des métadonnées dynamiques +* Traiter les structures JSON imbriquées + +## Entrées + +* ** Fichier JSON **: le (s) fichier (s) JSON (. +* ** Splitter de texte ** (facultatif): un séparateur de texte pour traiter le contenu extrait +* ** Extraction des pointeurs ** (Facultatif): Liste de pointeurs JSON séparée par des virgules pour extraire des données spécifiques +* ** Métadonnées supplémentaires ** (Facultatif): objet JSON pour l'extraction des métadonnées dynamiques du document +* ** omettre les clés de métadonnées ** (facultatif): liste de clés de métadonnées séparées par des virgules pour omettre à partir des métadonnées par défaut + +## Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques + +* Support de traitement de fichiers multiples +* Extraction de données basée sur le pointeur JSON +* Cartographie des métadonnées dynamiques +* Manipulation de la structure JSON imbriquée +* Prise en charge de l'intégration du stockage +* Capacités de manutention de base64 et de blob + +## Exemple d'utilisation + +Pour un document JSON comme: + +```json +[ + { + "url": "https://www.google.com", + "body": "This is body 1" + }, + { + "url": "https://www.yahoo.com", + "body": "This is body 2" + } +] +``` + +Vous pouvez extraire des champs spécifiques sous forme de métadonnées en utilisant: + +```json +{ + "source": "/url" +} +``` + +Cela ajoutera la valeur d'URL sous forme de métadonnées avec la clé "source" pour chaque document. diff --git a/fr/integrations/langchain/document-loaders/jsonlines.md b/fr/integrations/langchain/document-loaders/jsonlines.md new file mode 100644 index 00000000..26e7a194 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/jsonlines.md @@ -0,0 +1,102 @@ +# Fichier de lignes JSON + + Nœud

+ +JSON Lines (JSONL) est un format de texte où chaque ligne est une valeur JSON valide. Ce module fournit des fonctionnalités pour charger et traiter les fichiers JSONL, avec la prise en charge de l'extraction de contenu basée sur le pointeur et de la manipulation dynamique des métadonnées. + +Ce module fournit un chargeur de document JSONL sophistiqué qui peut: + +* Chargez des fichiers JSONL simples ou multiples +* Extraire des valeurs spécifiques à l'aide de pointeurs JSON +* Manipuler l'extraction des métadonnées dynamiques +* Traiter le contenu avec des séparateurs de texte +* Prise en charge des fichiers codés Base64 +* Gérer l'intégration du stockage des fichiers +* Personnaliser l'extraction des métadonnées + +## Entrées + +### Paramètres requis + +* ** Fichier JSONL **: le (s) fichier JSONL (. +* ** Extraction du pointeur **: Pointeur JSON pour extraire le contenu (par exemple, "clé" pour`{"key": "value"}`) + +### Paramètres facultatifs + +* ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +* ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques + +* Extraction du pointeur JSON +* Gestion des métadonnées dynamiques +* Support de division de texte +* Prise en charge du fichier Base64 +* Intégration de stockage de fichiers +* Gestion des erreurs +* Traitement économe en mémoire + +## Extraction du pointeur JSON + +### Exemple de base + +Pour le contenu JSONL: + +```jsonl +{"key": "value1", "source": "file1.txt"} +{"key": "value2", "source": "file2.txt"} +``` + +Avec le pointeur "clé", extraits: "Value1", "Value2" + +### Métadonnées dynamiques + +Vous pouvez extraire les valeurs sous forme de métadonnées à l'aide de pointeurs JSON: + +```json +{ + "source": "/source", + "custom": "/metadata/field" +} +``` + +## Structure de document + +Chaque document contient: + +* ** PageContent **: Extrait de contenu à l'aide du pointeur +* ** Metadata **: + * Source: chemin d'origine du fichier + * ligne: numéro de ligne dans le fichier + * pointeur: pointeur JSON utilisé + * Métadonnées dynamiques supplémentaires + +## Manutention de fichiers + +### Fichiers locaux + +* Chargement de fichier direct +* Base64 Contenu codé +* Prise en charge des fichiers multiples + +### Intégration de stockage + +* Prise en charge du système de stockage de fichiers +* Stockage basé sur l'organisation +* Stockage basé sur Chatflow + +## Notes + +* Un document par ligne JSONL +* Les lignes JSON non valides sont ignorées +* Traitement économe en mémoire +* Gestion des erreurs pour les pointeurs non valides +* Support aux structures JSON imbriquées +* Extraction des métadonnées dynamiques +* Formats de sortie flexibles diff --git a/fr/integrations/langchain/document-loaders/microsoft-excel.md b/fr/integrations/langchain/document-loaders/microsoft-excel.md new file mode 100644 index 00000000..e0e2f1f1 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/microsoft-excel.md @@ -0,0 +1,86 @@ +# Chargeur de documents Microsoft Excel + +
+ +Microsoft Excel est un programme de feuille de calcul qui comprend des outils de calcul, des tables de pivot et un langage de programmation macro. Ce module fournit des fonctionnalités pour charger et traiter les fichiers Excel à l'aide de sheetjs. + +Ce module fournit un chargeur de documents Excel sophistiqué qui peut: +- Chargez plusieurs formats de fichiers Excel +- Traiter plusieurs feuilles de calcul +- Convertir les lignes en documents structurés +- Gérer divers types de données +- Conserver la mise en forme des cellules +- Extraire les métadonnées par rangée +- Inférence du type de support + +## Entrées + +### Paramètres requis +- ** Fichier Excel **: le (s) fichier (s) pour traiter (.xls, .xlsx, .xlsm, .xlsb) + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Support de format multiple +- Traitement à plusieurs feuilles +- Préservation du type de données +- Extraction de métadonnées +- Type d'inférence +- Gestion des erreurs +- Traitement économe en mémoire + +## Formats pris en charge +- Excel binaire (.xls) +- Excel Workbook (.xlsx) +- Excel Macro-compatible (.xlsm) +- Excel Binary Workbook (.xlsb) + +## Traitement des types de données + +### Types pris en charge +- Texte (chaîne) +- Nombres (numéro) +- Dates (date) +- Booléens (booléen) +- Formules (valeurs calculées) +- Cellules vides (null) + +## Structure de document +Chaque document contient: +- ** PageContent **: Contenu de ligne formaté comme paires de valeurs clés +- ** Metadata **: + - feuille de travail: nom de feuille + - Rownum: indice de ligne + - Valeurs de colonne d'origine + - Métadonnées personnalisées supplémentaires + +## Traitement des lignes +Chaque ligne est convertie en document avec: +- Paires de valeurs clés pour chaque cellule +- En-têtes de colonne conservées +- Type d'information +- Position + +## Attributs de métadonnées +Les attributs par défaut incluent: +- feuille de travail: feuille ou nom de feuille de travail (chaîne) +- Rownum: Index de ligne (numéro) +- Attributs dynamiques basés sur les en-têtes de colonne + +## Notes +- Utilise des feuilles pour l'analyse +- Conserve les types de données +- Gère plusieurs feuilles +- Enfil Types de colonnes +- Traitement économe en mémoire +- Gestion des erreurs pour les fichiers non valides +- Formats de sortie flexibles +- Inférence de type colonne diff --git a/fr/integrations/langchain/document-loaders/microsoft-powerpoint.md b/fr/integrations/langchain/document-loaders/microsoft-powerpoint.md new file mode 100644 index 00000000..e49525cc --- /dev/null +++ b/fr/integrations/langchain/document-loaders/microsoft-powerpoint.md @@ -0,0 +1,84 @@ +# Chargeur de documents Microsoft PowerPoint + +
+ +Microsoft PowerPoint est un programme de présentation pour la création et l'affichage des diapositives. Ce module fournit des fonctionnalités pour charger et traiter les fichiers PowerPoint à l'aide de OfficePaSer. + +Ce module fournit un chargeur de document PowerPoint sophistiqué qui peut: +- Chargez des présentations PowerPoint +- Extraire du texte des diapositives +- Diviser le contenu en diapositives individuelles +- Numéro de la diapositive +- Traiter les métadonnées par diapositive +- Prise en charge de plusieurs formats de diapositive +- Gérer divers séparateurs de diapositives + +## Entrées + +### Paramètres requis +- ** Fichier PowerPoint **: le (s) fichier (s) PowerPoint (.ppt, .pptx) + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Extraction de texte +- Séparation de diapositives +- Manipulation des métadonnées +- Gestion des erreurs +- Traitement économe en mémoire +- Détection de diapositives heuristiques +- Filtrage de contenu + +## Méthodes de détection de diapositives + +### Reconnaissance des modèles +Le chargeur tente d'identifier les diapositives en utilisant des modèles communs: +- Marqueurs "Slide X" +- Marqueurs "page X" +- Numéros de page "x / y" +- Souligneurs de soulignement +- Séparateurs de tableau de bord +- Plusieurs lignes de newlines + +### Mécanismes de secours +Si la reconnaissance des modèles échoue: +1. Split by Double Newlines +2. Traitez le contenu comme une seule diapositive + +## Structure de document +Chaque document contient: +- ** PageContent **: Contenu du texte extrait de la diapositive +- ** Metadata **: + - SlideNumber: numéro de diapositif séquentiel + - DocumentType: "PowerPoint" + - Métadonnées personnalisées supplémentaires + +## Traitement du contenu +- Les lames vides sont filtrées +- Espace blanc de tête / traînage supprimé +- Validation minimale de la longueur du contenu +- Validation du nombre de diapositives raisonnable + +## Attributs de métadonnées +Les attributs par défaut incluent: +- SlideNumber: Numéro de diapositive (numéro) +- DocumentType: Type de document (chaîne) +- Métadonnées personnalisées à partir de l'entrée + +## Notes +- Utilise OfficeArser pour l'extraction +- Gère divers formats de diapositive +- Détection de diapositive intelligente +- Validation du contenu +- Traitement économe en mémoire +- Gestion des erreurs pour les fichiers non valides +- Formats de sortie flexibles +- Mécanismes de secours robustes diff --git a/fr/integrations/langchain/document-loaders/microsoft-word.md b/fr/integrations/langchain/document-loaders/microsoft-word.md new file mode 100644 index 00000000..5b37fdf9 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/microsoft-word.md @@ -0,0 +1,86 @@ +# Chargeur de documents Microsoft Word + +
+ +Microsoft Word est un logiciel de traitement de texte pour créer et modifier des documents texte. Ce module fournit des fonctionnalités pour charger et traiter des documents de mots à l'aide de OfficePaSer. + +Ce module fournit un chargeur de documents de mots sophistiqué qui peut: +- Documents de chargement +- Extraire le contenu du texte +- Diviser le contenu en sections +- Numéro de la page +- Traiter les métadonnées par section +- Prise en charge de plusieurs formats de section +- Gérer divers séparateurs de section + +## Entrées + +### Paramètres requis +- ** Fichier Word **: le (s) fichier (s) pour traiter (.doc, .docx) + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Extraction de texte +- Séparation de section +- Manipulation des métadonnées +- Gestion des erreurs +- Traitement économe en mémoire +- Détection de section heuristique +- Filtrage de contenu + +## Méthodes de détection de section + +### Reconnaissance des modèles +Le chargeur tente d'identifier les sections en utilisant des modèles communs: +- Marqueurs "page X" +- Marqueurs "Section X" +- Marqueurs "Chapitre X" +- Sections numérotées (par exemple, "1.", "2.") +- Tous les têtes de capuchons +- Séparateurs de soulignement longs +- Séparateurs de tiret long + +### Mécanismes de secours +Si la reconnaissance des modèles échoue: +1. Split par plusieurs nouvelles lignes +2. Split by Double Newlines +3. Traitez le contenu comme une seule section + +## Structure de document +Chaque document contient: +- ** PageContent **: Contenu texte extrait de la section +- ** Metadata **: + - DocumentType: "Word" + - Pagenumber: numéro de section séquentiel + - Métadonnées personnalisées supplémentaires + +## Traitement du contenu +- Les sections vides sont filtrées +- Espace blanc de tête / traînage supprimé +- Validation minimale de la longueur du contenu +- Validation du nombre de sections raisonnables + +## Attributs de métadonnées +Les attributs par défaut incluent: +- DocumentType: Type de document (chaîne) +- PageCount: Nombre de pages / sections (numéro) +- Métadonnées personnalisées à partir de l'entrée + +## Notes +- Utilise OfficeArser pour l'extraction +- Gère divers formats de documents +- Détection de section intelligente +- Validation du contenu +- Traitement économe en mémoire +- Gestion des erreurs pour les fichiers non valides +- Formats de sortie flexibles +- Mécanismes de secours robustes diff --git a/fr/integrations/langchain/document-loaders/notion.md b/fr/integrations/langchain/document-loaders/notion.md new file mode 100644 index 00000000..e31c56d8 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/notion.md @@ -0,0 +1,116 @@ +# Notion + +La notion est une plate-forme de collaboration qui combine la prise de notes, la gestion des connaissances et la gestion de projet. Ce module fournit trois chargeurs différents pour traiter le contenu de la notion: la base de données, la page et les chargeurs de dossiers. + +## Chargeur de base de données de notion + +

notion database notion notion database notion notion DATAbase notion notion notion de notion notion de notion de notion notion de notion de notion de notion de notion de notion de notion de notion de notion de notion de notion de notion de notion Nœud

+ +Le chargeur de base de données extrait le contenu des bases de données de notion, traitant chaque ligne comme un document distinct. + +### Caractéristiques + +* Chargez les lignes de la base de données comme documents +* Extraire les propriétés sous forme de métadonnées +* Soutenir les en-têtes de propriété +* Gérer le chargement simultané +* Traiter le contenu avec des séparateurs de texte +* Personnaliser l'extraction des métadonnées + +### Paramètres requis + +* ** Connectez les informations d'identification **: notion API +* ** ID de base de données **: L'identifiant unique de la base de données de notion + +## Chargeur de page de notion + +

Page de notion Node

+ +Le chargeur de page extrait le contenu des pages de notion, y compris toutes les pages enfants en tant que documents distincts. + +### Caractéristiques + +* Chargez le contenu de la page sous forme de documents +* Traiter les pages enfants récursivement +* Extraire les propriétés de la page +* Gérer la hiérarchie des pages +* Prise en charge du fractionnement du texte +* Personnaliser l'extraction des métadonnées + +### Paramètres requis + +* ** Connectez les informations d'identification **: notion API +* ** ID de page **: l'identifiant hexadécimal à 32 caractères de l'URL de la page + +## Chargeur de dossiers de notion + +

NODE NODE

+ +Le chargeur de dossier traite le contenu de notion exporté et dézippé à partir d'un dossier local. + +### Caractéristiques + +* Traiter le contenu de la notion exportée +* Gérer plusieurs pages +* Prise en charge du système de fichiers local +* Extraire le contenu de la page +* Maintenir la structure des documents +* Prise en charge du fractionnement du texte +* Personnaliser l'extraction des métadonnées + +### Paramètres requis + +* ** Dossier de notion **: Chemin vers le dossier de notion exporté et dézippé + +## Caractéristiques communes + +Tous les chargeurs de notion Support: + +### Paramètres facultatifs + +* ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +* ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +### Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +## Authentification + +### Authentification de l'API (base de données et chargeurs de page) + +* Nécessite un jeton d'intégration de notion +* Limitation du taux d'API géré automatiquement +* Prise en charge de l'accès au niveau de l'espace de travail +* Gestion d'identification sécurisée + +### Accès local (chargeur de dossiers) + +* Aucune authentification requise +* Accès au système de fichiers direct +* Traiter le contenu hors ligne +* Gérer les données exportées + +## Structure de document + +Chaque document contient: + +* ** PageContent **: Contenu texte extrait +* ** Metadata **: + * Source: source d'origine (URL ou chemin de fichier) + * Titre: Page ou Titre de la base de données + * Propriétés: propriétés de notion + * Métadonnées personnalisées supplémentaires + +## Notes + +* Les chargeurs d'API nécessitent une configuration d'intégration de notion +* Le chargeur de dossier a besoin de contenu exporté +* La limitation du taux géré automatiquement +* Traitement économe en mémoire +* Gestion des erreurs pour les entrées non valides +* Prise en charge des grands ensembles de données +* Formats de sortie flexibles +* Personnalisation des métadonnées diff --git a/fr/integrations/langchain/document-loaders/oxylabs.md b/fr/integrations/langchain/document-loaders/oxylabs.md new file mode 100644 index 00000000..cfbffdbf --- /dev/null +++ b/fr/integrations/langchain/document-loaders/oxylabs.md @@ -0,0 +1,42 @@ +--- +description: Get data from any website with Oxylabs. +--- + +# Chargeurs de documents oxylabs + +OxyLabs est un service de grattage Web qui récupère les données Web publiques à grande échelle, avec des outils conçus pour naviguer dans les restrictions régionales. + +

oxylabs docuemnt chargeur nœud + + +### Caractéristiques +- Récupérer les données de Google, d'Amazon et de tout autre site Web +- Régler la géolocalisation +- Utiliser le rendu du navigateur +- Analyser les données +- Spécifiez les types d'agents utilisateur +- Traiter le contenu avec des séparateurs de texte + +### Paramètres requis +- ** Connectez les informations d'identification **: les informations d'identification de l'API OxyLabs +- ** Query **: Rechercher la requête ou l'URL +- ** Source **: l'une des sources disponibles: + - Universal - grattez n'importe quel site Web + - Recherche Google - StrAchez les résultats de la recherche Google + - Product Amazon - StrArez les informations sur les produits Amazon + - Recherche d'Amazon - StrAter les résultats de recherche Amazon + +### Paramètres facultatifs +- ** Geolocation **: Définit l'emplacement GEO du proxy pour récupérer les données. Voir[documentation](https://files.gitbook.com/v0/b/gitbook-x-prod.appspot.com/o/spaces%2FiwDdoZGfMbUe5cRL2417%2Fuploads%2FxoQb19qSyodB2D4no0DZ%2FList%20of%20supported%20geo_location%20values_sapi.json?alt=media&token=d2e2df7b-10ba-4399-a547-0c4a99e62293)pour plus de détails. +- ** Render **: Active le rendu JavaScript lorsqu'il est défini sur true. +- ** Parse **: Renvoie les données analysées lorsqu'elles sont définies sur true, tant qu'un analyseur dédié existe pour le type de page de l'URL soumis. +- ** Type d'agent utilisateur **: type de périphérique et navigateur. + +### Sorties +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + + +## Structure de document +Chaque document contient: +- ** PageContent **: Contenu de la page extrait diff --git a/fr/integrations/langchain/document-loaders/pdf-file.md b/fr/integrations/langchain/document-loaders/pdf-file.md new file mode 100644 index 00000000..3707fb11 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/pdf-file.md @@ -0,0 +1,85 @@ +# Chargeur de documents PDF + +PDF (Format de document portable) est un format de fichier développé par Adobe pour présenter des documents de manière cohérente sur les plateformes logicielles. Ce module fournit des fonctionnalités pour charger et traiter les fichiers PDF à l'aide de pdf.js. + +Ce module fournit un chargeur de document PDF sophistiqué qui peut: +- Chargez des fichiers PDF uniques ou multiples +- Diviser les documents par page ou fichier +- Prise en charge des fichiers codés Base64 +- Gérer l'intégration du stockage des fichiers +- Traiter le contenu avec des séparateurs de texte +- Prise en charge des versions PDF héritées +- Personnaliser l'extraction des métadonnées + +## Entrées + +### Paramètres requis +- ** Fichier PDF **: le (s) fichier (s) PDF à traiter (extension .pdf) +- ** Utilisation **: Choisissez entre: + - Un document par page + - Un document par fichier + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Utilisez le héritage Build **: s'il faut utiliser le héritage pdf.js build +- ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Prise en charge des fichiers multiples +- Division au niveau de la page +- Prise en charge de la version héritée +- Extraction de texte +- Manipulation des métadonnées +- Gestion des erreurs +- Traitement économe en mémoire + +## Modes de traitement + +### En mode page +- Chaque page devient un document +- Conserve les numéros de page +- Métadonnées de page individuelles +- Accès de contenu granulaire + +### Par mode de fichier +- PDF entier en un seul document +- Contenu combiné +- Ensemble de métadonnées uniques +- Mémoire efficace + +## Structure de document +Chaque document contient: +- ** PageContent **: Contenu texte extrait +- ** Metadata **: + - Source: chemin d'origine du fichier + - PDF: métadonnées spécifiques au PDF + - Page: numéro de page (en mode par page) + - Métadonnées personnalisées supplémentaires + +## Manutention de fichiers + +### Fichiers locaux +- Chargement de fichier direct +- Base64 Contenu codé +- Prise en charge des fichiers multiples + +### Intégration de stockage +- Prise en charge du système de stockage de fichiers +- Stockage basé sur l'organisation +- Stockage basé sur Chatflow + +## Notes +- Utilise pdf.js pour l'extraction +- Prise en charge de la version héritée +- Traitement économe en mémoire +- Gestion des erreurs pour les fichiers non valides +- Prise en charge des grands PDF +- Formats de sortie flexibles +- Personnalisation des métadonnées +- Manipulation du codage de texte diff --git a/fr/integrations/langchain/document-loaders/plain-text.md b/fr/integrations/langchain/document-loaders/plain-text.md new file mode 100644 index 00000000..4c43031e --- /dev/null +++ b/fr/integrations/langchain/document-loaders/plain-text.md @@ -0,0 +1,95 @@ +# Texte brut + +

+ +Le texte brut est la forme la plus élémentaire de données de texte, ne contenant pas de formatage ou d'autres informations intégrées. Ce module fournit des fonctionnalités pour charger et traiter directement le contenu de texte brut. + +Ce module fournit un chargeur de document texte simple qui peut: + +* Chargez directement le contenu du texte +* Traiter le texte avec des séparateurs +* Ajouter des métadonnées personnalisées +* Gérer les caractères d'évasion +* Prise en charge du fractionnement du document +* Personnaliser l'extraction des métadonnées +* Gérer l'encodage du texte + +## Entrées + +### Paramètres requis + +* ** Texte **: le contenu en texte brut à traiter + +### Paramètres facultatifs + +* ** Splitter de texte **: un séparateur de texte pour traiter le contenu +* ** Metadata supplémentaires **: objet JSON avec métadonnées supplémentaires +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +* ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +* ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques + +* Entrée de texte direct +* Support de division de texte +* Manipulation des métadonnées +* Gestion des erreurs +* Traitement économe en mémoire +* Traitement de codage de caractère +* Formats de sortie flexibles + +## Traitement du texte + +### Mode direct + +* Création de documents uniques +* Conserve le texte original +* Manipulation de métadonnées de base +* Mémoire efficace + +### Mode partagé + +* Création de documents multiples +* Règles de division personnalisées +* Métadonnées individuelles +* Accès de contenu granulaire + +## Structure de document + +Chaque document contient: + +* ** PageContent **: Contenu du texte original ou divisé +* ** Metadata **: + * Métadonnées personnalisées à partir de l'entrée + * Métadonnées spécifiques divisées (lors de l'utilisation du séparateur) + * Propriétés de métadonnées supplémentaires + +## Traitement du contenu + +### Entrée de texte + +* Entrée directe de chaîne +* Support multi-lignes +* Support Unicode +* Échapper à la manipulation des personnages + +### Options de traitement + +* Division de texte +* Ajout de métadonnées +* Normalisation du caractère +* Manipulation des espaces + +## Notes + +* Simple et efficace +* Aucune gestion de fichier requise +* Traitement économe en mémoire +* Gestion des erreurs pour les entrées non valides +* Prise en charge des grands textes +* Formats de sortie flexibles +* Personnalisation des métadonnées +* Caractère Encoding Support diff --git a/fr/integrations/langchain/document-loaders/playwright-web-scraper.md b/fr/integrations/langchain/document-loaders/playwright-web-scraper.md new file mode 100644 index 00000000..0bdb9eda --- /dev/null +++ b/fr/integrations/langchain/document-loaders/playwright-web-scraper.md @@ -0,0 +1,65 @@ +# Scraper Web du dramaturge + +Le dramaturge est une bibliothèque puissante pour l'automatisation du navigateur qui peut contrôler le chrome, Firefox et WebKit avec une seule API. Ce module fournit des capacités de grattage Web avancées à l'aide de Playwright pour extraire le contenu des pages Web, y compris du contenu dynamique qui nécessite une exécution JavaScript. + +Ce module fournit un grattoir Web sophistiqué qui peut: +- Chargez le contenu à partir de pages Web uniques ou multiples +- Gérer le contenu rendu javascript +- Soutenez diverses stratégies de chargement de pages +- Attendez que des éléments spécifiques se chargent +- Crawl liens relatifs des sites Web +- Processus des sitemaps XML + +## Entrées + +- ** URL **: L'URL de la page Web pour gratter +- ** Splitter de texte ** (facultatif): un séparateur de texte pour traiter le contenu extrait +- ** Obtenez la méthode des liens relatifs ** (facultatif): Choisissez entre: + - Crawl Web: Crawl Liens relatifs de l'URL HTML + - Gratter le plan du site XML: Racler les liens relatifs de l'URL du site XML +- ** Obtenez des liens relatifs Limite ** (Facultatif): Limite pour le nombre de liens relatifs à traiter (par défaut: 10, 0 pour tous les liens) +- ** Attendez jusqu'à ** (facultatif): Stratégie de chargement de la page: + - Charge: attendez que l'événement de chargement tire + - Contenu DOM Chargé: attendez l'événement DomContent Télélé + - Network Inactive: attendez qu'aucune connexion réseau pendant 500 ms + - Commit: attendez la réponse initiale du réseau et le chargement des documents +- ** Attendez que le sélecteur charge ** (facultatif): le sélecteur CSS attend avant de gratter +- ** Métadonnées supplémentaires ** (Facultatif): objet JSON avec des métadonnées supplémentaires à ajouter aux documents +- ** omettre les clés de métadonnées ** (facultative): liste de clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Prise en charge du moteur multi-navigateur (Chromium, Firefox, Webkit) +- Prise en charge de l'exécution JavaScript +- Stratégies de chargement de page configurables +- Capacités d'attente des éléments +- Fonctionnalité de rampe sur le Web +- Traitement du plan du site XML +- Opération de navigateur sans tête +- Configuration de bac à sable +- Gestion des erreurs pour les URL non valides +- Personnalisation des métadonnées + +## Notes +- S'exécute en mode sans tête par défaut +- Utilise le mode sans sandbox pour la compatibilité +- Les URL non valides lanceront une erreur +- La définition de la limite des liens à 0 récupérera tous les liens disponibles (peut prendre plus de temps) +- Prend en charge l'attente d'éléments DOM spécifiques avant l'extraction + +## Gratter une URL + +1. _ (Facultatif) _ connecter **[Text Splitter](../text-splitters/)**. +2. Entrée URL souhaitée à gratter. + +## Crawl et gratter plusieurs URL +Visite **[Web Crawl](../../use-cases/web-crawl.md)** Guide pour permettre le grattage de plusieurs pages. + +## Ressources + +* [LangChain JS Playwright](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/web_playwright) +* [Playwright](https://playwright.dev/) diff --git a/fr/integrations/langchain/document-loaders/puppeteer-web-scraper.md b/fr/integrations/langchain/document-loaders/puppeteer-web-scraper.md new file mode 100644 index 00000000..a9b4adfe --- /dev/null +++ b/fr/integrations/langchain/document-loaders/puppeteer-web-scraper.md @@ -0,0 +1,68 @@ +# Grattoir Web de marionnettiste + +Puppeteer est une bibliothèque Node.js qui fournit une API de haut niveau pour contrôler Chrome / Chromium sur le protocole Devtools. Ce module fournit des capacités de grattage Web avancées à l'aide de marionnetteer pour extraire le contenu à partir de pages Web, y compris du contenu dynamique qui nécessite une exécution JavaScript. + +Ce module fournit un grattoir Web sophistiqué qui peut: +- Chargez le contenu à partir de pages Web uniques ou multiples +- Gérer le contenu rendu javascript +- Soutenez diverses stratégies de chargement de pages +- Attendez que des éléments spécifiques se chargent +- Crawl liens relatifs des sites Web +- Processus des sitemaps XML + +## Entrées + +- ** URL **: L'URL de la page Web pour gratter +- ** Splitter de texte ** (facultatif): un séparateur de texte pour traiter le contenu extrait +- ** Obtenez la méthode des liens relatifs ** (facultatif): Choisissez entre: + - Crawl Web: Crawl Liens relatifs de l'URL HTML + - Gratter le plan du site XML: Racler les liens relatifs de l'URL du site XML +- ** Obtenez des liens relatifs Limite ** (Facultatif): Limite pour le nombre de liens relatifs à traiter (par défaut: 10, 0 pour tous les liens) +- ** Attendez jusqu'à ** (facultatif): Stratégie de chargement de la page: + - Charge: lorsque le DOM du document HTML initial est chargé + - Contenu DOM Chargé: lorsque le DOM du document HTML complet est chargé + - Network Invalette 0: pas de connexions réseau pour 500 ms + - Network Idle 2: Pas plus de 2 connexions réseau pendant 500 ms +- ** Attendez que le sélecteur charge ** (facultatif): le sélecteur CSS attend avant de gratter +- ** Métadonnées supplémentaires ** (Facultatif): objet JSON avec des métadonnées supplémentaires à ajouter aux documents +- ** omettre les clés de métadonnées ** (facultative): liste de clés de métadonnées séparées par des virgules pour omettre + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Prise en charge de l'exécution JavaScript +- Stratégies de chargement de page configurables +- Capacités d'attente des éléments +- Fonctionnalité de rampe sur le Web +- Traitement du plan du site XML +- Opération de navigateur sans tête +- Configuration de bac à sable +- Gestion des erreurs pour les URL non valides +- Personnalisation des métadonnées + +## Notes +- S'exécute en mode sans tête par défaut +- Utilise le mode sans sandbox pour la compatibilité +- Les URL non valides lanceront une erreur +- La définition de la limite des liens à 0 récupérera tous les liens disponibles (peut prendre plus de temps) +- Prend en charge l'attente d'éléments DOM spécifiques avant l'extraction + +## Gratter une URL + +1. _ (Facultatif) _ connecter **[Text Splitter](../text-splitters/)**. +2. Entrée URL souhaitée à gratter. + +## Crawl et gratter plusieurs URL +Visite **[Web Crawl](../../use-cases/web-crawl.md)** Guide pour permettre le grattage de plusieurs pages. + +## Sortir + +Charge le contenu de l'URL en tant que document + +## Ressources + +* [LangChain JS Puppeteer](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/web_puppeteer) +* [Puppeteer](https://pptr.dev/) diff --git a/fr/integrations/langchain/document-loaders/s3-file-loader.md b/fr/integrations/langchain/document-loaders/s3-file-loader.md new file mode 100644 index 00000000..91f1f57a --- /dev/null +++ b/fr/integrations/langchain/document-loaders/s3-file-loader.md @@ -0,0 +1,94 @@ +# Chargeur de fichiers S3 + +Amazon S3 (Simple Storage Service) est un service de stockage d'objets offrant une évolutivité de pointe, une disponibilité des données, une sécurité et des performances. Ce module fournit des fonctionnalités complètes pour charger et traiter les fichiers stockés dans des seaux S3. + +Ce module fournit un chargeur de document S3 sophistiqué qui peut: +- Chargez des fichiers à partir de seaux S3 à l'aide d'identification AWS +- Prise en charge de plusieurs formats de fichiers (PDF, DOCX, CSV, Excel, PowerPoint, Fichiers texte) +- Processus de fichiers à l'aide de chargeurs intégrés ou non structurés.io API +- Gérer le texte et les fichiers binaires +- Personnaliser l'extraction des métadonnées + +## Entrées + +### Paramètres requis +- ** seau **: le nom du seau S3 +- ** Clé d'objet **: l'identifiant unique de l'objet dans le seau S3 +- ** Région **: Région AWS où se trouve le seau (par défaut: US-East-1) + +### Options de traitement +- ** Méthode de traitement des fichiers **: Choisissez entre: + - Intégrés de chargeurs: utilisez des processeurs de format de fichiers natifs + - Non structuré: utilisez une API non structurée.io pour un traitement avancé +- ** Splitter de texte ** (facultatif): séparateur de texte pour le traitement intégré +- ** Métadonnées supplémentaires ** (Facultatif): objet JSON avec métadonnées supplémentaires +- ** omettre les clés de métadonnées ** (facultative): clés pour omettre des métadonnées + +### Options non structurées.io +- ** URL API non structurée **: point de terminaison pour API non structuré.io +- ** Clé API non structurée ** (facultatif): clé API pour l'authentification +- ** Stratégie **: Stratégie de traitement (Hi_res, Fast, OCR_ONLY, AUTO) +- ** Encodage **: Méthode de codage de texte (par défaut: UTF-8) +- ** Sauter les types de tables inférieurs **: Types de documents pour sauter l'extraction de la table + +## Sorties + +- ** Document **: tableau d'objets de document contenant des métadonnées et un conceptent +- ** Texte **: chaîne concaténée du conceptent de documents + +## Caractéristiques +- Intégration AWS S3 +- Prise en charge du format de fichier multiple +- Traitement intégré et non structuré.io +- Régions AWS configurables +- Manipulation flexible des métadonnées +- Traitement de fichiers binaires +- Gestion temporaire des fichiers +- Détection de type mime + +## Types de fichiers pris en charge +- Documents PDF +- Microsoft Word (DOCX) +- Microsoft Excel +- Microsoft PowerPoint +- Fichiers CSV +- Fichiers texte +- Et plus par non structuré.io + +## Notes +- Nécessite des informations d'identification AWS (facultative si vous utilisez des rôles IAM) +- Certains types de fichiers peuvent nécessiter des méthodes de traitement spécifiques +- L'API non structurée.io nécessite une configuration et des informations d'identification distinctes +- Les fichiers temporaires sont créés et gérés automatiquement +- Gestion des erreurs pour les types de fichiers non pris en charge + +## Configuration non structurée + +Vous pouvez soit utiliser l'API hébergé ou exécuter localement via Docker. + +* [Hosted API](https://unstructured-io.github.io/unstructured/api.html) +* Docker:`docker run -p 8000:8000 -d --rm --name unstructured-api quay.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0` + +## Configuration du chargeur de fichiers S3 + +1 \ \. Faites glisser et déposez le chargeur de fichiers S3 sur Canvas: + +
+ +2 \. Indemnité AWS: créez un nouvel diplôme pour votre compte AWS. Vous aurez besoin de l'accès et de la clé secrète. N'oubliez pas d'accorder la politique du seau S3 au compte associé. Vous pouvez vous référer au guide politique[here](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.Authorizing.IAM.S3CreatePolicy.html). + +
+ +3. Bodet: Connectez-vous à votre console AWS et accédez à S3. Obtenez le nom de votre seau: + +
+ +4. Clé: cliquez sur l'objet que vous souhaitez utiliser et obtenez le nom de la clé: + +
+ +5. URL de l'API non structurée: Selon la façon dont vous utilisez non structuré, que ce soit via une API ou Docker hébergée, modifiez le paramètre URL de l'API non structuré. Si vous utilisez une API hébergée, vous aurez également besoin de la touche API. +6. Vous pouvez ensuite commencer à discuter avec votre fichier depuis S3. Vous n'avez pas à spécifier le séparateur de texte pour la réduction du document car il est géré automatiquement par non-structuré. + +
+ diff --git a/fr/integrations/langchain/document-loaders/searchapi-for-web-search.md b/fr/integrations/langchain/document-loaders/searchapi-for-web-search.md new file mode 100644 index 00000000..ea8e09aa --- /dev/null +++ b/fr/integrations/langchain/document-loaders/searchapi-for-web-search.md @@ -0,0 +1,88 @@ +--- +description: Load data from real-time search results. +--- + +# Searchapi pour la recherche sur le Web + +

searchapi pour la recherche Web

+ +Le SeechAPI pour le chargeur de recherche Web donne accès aux résultats de recherche en temps réel à partir de plusieurs moteurs de recherche à l'aide du service SearchAPI. Ce chargeur vous permet de récupérer, de traiter et de structurer les résultats de recherche comme des documents qui peuvent être utilisés dans votre flux de travail. + +## Caractéristiques + +* Résultats de recherche en temps réel à partir de plusieurs moteurs de recherche +* Paramètres de recherche personnalisables +* Capacités de division de texte +* Manipulation flexible des métadonnées +* Formats de sortie multiples +* Authentification des clés de l'API + +## Entrées + +### Paramètres requis + +* ** Connectez les informations d'identification **: Recherche d'identification de la clé de l'API SearchAPI +* Au moins un de: + * ** requête **: chaîne de requête de recherche + * ** Paramètres personnalisés **: objet JSON avec paramètres de recherche + +### Paramètres facultatifs + +* ** Query **: la requête de recherche à exécuter (sinon en utilisant des paramètres personnalisés) +* ** Paramètres personnalisés **: objet JSON avec paramètres de recherche supplémentaires + * Prend en charge tous les paramètres de[SearchApi documentation](https://www.searchapi.io/docs/google) + * Peut remplacer les paramètres par défaut + * Permet des configurations spécifiques au moteur +* ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +* ** Métadonnées supplémentaires **: objet JSON avec des métadonnées supplémentaires à ajouter aux documents +* ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules à exclure + * Format:`key1, key2, key3.nestedKey1` + * Utiliser \ * pour supprimer toutes les métadonnées par défaut + +## Sorties + +* ** Document **: tableau d'objets de document contenant: + * métadonnées: métadonnées du résultat de la recherche + * contenu de la recherche: contenu de résultat de recherche +* ** Texte **: chaîne concaténée du contenu de tous les résultats de recherche + +## Structure de document + +Chaque document contient: + +* ** PageContent **: le contenu principal du résultat de la recherche +* ** Metadata **: + * Métadonnées de résultat de recherche par défaut + * Métadonnées personnalisées (si spécifiées) + * Métadonnées filtrées (basées sur les clés omises) + +## Manipulation des métadonnées + +Deux façons de personnaliser les métadonnées: + +1. ** métadonnées supplémentaires ** + * Ajouter de nouveaux champs de métadonnées via JSON + * Fusionné avec les métadonnées existantes + * Utile pour ajouter un suivi ou une catégorisation personnalisée +2. ** omettre les clés de métadonnées ** + * Retirer les champs de métadonnées indésirables + * Liste de clés séparée par des virgules pour exclure + * Prise en charge de la suppression des clés imbriqués + * Utiliser \ * pour supprimer toutes les métadonnées par défaut + +## Conseils d'utilisation + +* Fournir des requêtes de recherche spécifiques pour de meilleurs résultats +* Utilisez des paramètres personnalisés pour les configurations de recherche avancées +* Envisagez d'utiliser des séparateurs de texte pour les grands résultats de recherche +* Gérer les métadonnées pour conserver les informations pertinentes +* Manipuler les limites de taux grâce à l'espacement des requêtes approprié + +## Notes + +* Nécessite la clé de l'API Searchapi +* Respecte les limites de taux de l'API +* Prend en charge plusieurs moteurs de recherche +* Résultats de recherche en temps réel +* Traitement économe en mémoire +* Gestion des erreurs pour les demandes d'API diff --git a/fr/integrations/langchain/document-loaders/serpapi-for-web-search.md b/fr/integrations/langchain/document-loaders/serpapi-for-web-search.md new file mode 100644 index 00000000..20d3118a --- /dev/null +++ b/fr/integrations/langchain/document-loaders/serpapi-for-web-search.md @@ -0,0 +1,89 @@ +--- +description: Load and process data from web search results. +--- + +# Serpapi pour la recherche Web + +

serpapi pour le nœud de recherche Web

+ +Le serpapi pour le chargeur de recherche Web vous permet de récupérer et de traiter les résultats de recherche Web à l'aide du service SERPAPI. Ce chargeur transforme les résultats de recherche en documents structurés qui peuvent être facilement intégrés dans votre flux de travail, ce qui le rend idéal pour les applications nécessitant des données de recherche Web en temps réel. + +## Caractéristiques +- Résultats de recherche Web en temps réel +- Capacités de division de texte +- Manipulation des métadonnées personnalisables +- Formats de sortie multiples +- Authentification des clés de l'API +- Traitement de documents efficace + +## Entrées + +### Paramètres requis +- ** Connectez les informations d'identification **: les informations d'identification de la clé de l'API serpapi +- ** requête **: la requête de recherche à exécuter + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Métadonnées supplémentaires **: objet JSON avec des métadonnées supplémentaires à ajouter aux documents +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules à exclure + - Format:`key1, key2, key3.nestedKey1` + - Utiliser * pour supprimer toutes les métadonnées par défaut, sauf les métadonnées personnalisées + +## Sorties + +- ** Document **: tableau d'objets de document contenant: + - métadonnées: métadonnées du résultat de la recherche + - contenu de la recherche: contenu de résultat de recherche +- ** Texte **: chaîne concaténée du contenu de tous les résultats de recherche + +## Structure de document +Chaque document contient: +- ** PageContent **: le contenu principal du résultat de la recherche +- ** Metadata **: + - Métadonnées de résultat de recherche par défaut + - Métadonnées personnalisées (si spécifiées) + - Métadonnées filtrées (basées sur les clés omises) + +## Manipulation des métadonnées +Deux façons de personnaliser les métadonnées: +1. ** métadonnées supplémentaires ** + - Ajouter de nouveaux champs de métadonnées via JSON + - Fusionné avec les métadonnées existantes + - Utile pour ajouter un suivi ou une catégorisation personnalisée + +2. ** omettre les clés de métadonnées ** + - Retirer les champs de métadonnées indésirables + - Liste de clés séparée par des virgules pour exclure + - Prise en charge de la suppression des clés imbriqués + - Utiliser * pour supprimer toutes les métadonnées par défaut + +## Conseils d'utilisation +- Fournir des requêtes de recherche spécifiques pour de meilleurs résultats +- Utilisez des séparateurs de texte pour les grands résultats de recherche +- Personnalisez les métadonnées pour répondre à vos besoins +- Considérez les limites de taux lors de la fabrication de requêtes multiples +- Gérer les résultats de recherche de manière appropriée en fonction de la taille + +## Notes +- Nécessite la clé de l'API SERPAPI +- Respecte les limites de taux de l'API +- Résultats de recherche en temps réel +- Traitement économe en mémoire +- Gestion des erreurs pour les demandes d'API +- Prend en charge les formats de sortie de document et de texte + +## Exemple d'utilisation +```typescript +// Example search query +query: "artificial intelligence latest developments" + +// Example additional metadata +metadata: { + "source": "serpapi", + "category": "tech", + "timestamp": "2024-03-21" +} + +// Example metadata keys to omit +omitMetadataKeys: "snippet, position, link" +``` diff --git a/fr/integrations/langchain/document-loaders/spider-web-scraper-crawler.md b/fr/integrations/langchain/document-loaders/spider-web-scraper-crawler.md new file mode 100644 index 00000000..f960e98f --- /dev/null +++ b/fr/integrations/langchain/document-loaders/spider-web-scraper-crawler.md @@ -0,0 +1,100 @@ +--- +description: Scrape & Crawl the web with Spider - the fastest open source web scraper & crawler. +--- + +# Spider web grattoir / Crawler + + Spider Node

spider web scraper / crawler nœud

+ +[Spider](https://spider.cloud/?ref=flowise)est le grattoir et le robot d'open source le plus rapide qui renvoie les données pratiquées par LLM. Pour commencer à utiliser ce nœud, vous avez besoin d'une clé API à partir de[Spider.cloud](https://spider.cloud/?ref=flowise). + +## Commencer + +1. Aller au[Spider.cloud](https://spider.cloud/?ref=flowise)Site Web et inscrivez-vous à un compte gratuit. +2. Alors allez au[API Keys](https://spider.cloud/api-keys)et créer une nouvelle clé API. +3. Copiez la touche API et collez-le dans le champ "Contalin" dans le nœud Spider. + +## Caractéristiques +- Deux modes d'opération: gratter et ramper +- Capacités de division de texte +- Manipulation des métadonnées personnalisables +- Configuration des paramètres flexibles +- Formats de sortie multiples +- Contenu formulé Markdown +- Manipulation des limites de taux + +## Entrées + +### Paramètres requis +- ** Mode **: Choisissez entre: + - ** Scrape **: Extraire les données d'une seule page + - ** Crawl **: Extraire les données de plusieurs pages dans le même domaine +- ** URL de la page Web **: L'URL cible pour gratter ou ramper (par exemple, https://spider.cloud) +- ** Prédiction **: clé API Spider + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Limite **: Nombre maximum de pages à ramper (par défaut: 25, uniquement applicable en mode crawl) +- ** Métadonnées supplémentaires **: objet JSON avec des métadonnées supplémentaires à ajouter aux documents +- ** Paramètres supplémentaires **: objet JSON avec[Spider API parameters](https://spider.cloud/docs/api) + - Exemple:`{ "anti_bot": true }` + - Note:`return_format`est toujours réglé sur "Markdown" +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules à exclure + - Format:`key1, key2, key3.nestedKey1` + - Utiliser * pour supprimer toutes les métadonnées par défaut + +## Sorties + +- ** Document **: tableau d'objets de document contenant: + - métadonnées: métadonnées de page et champs personnalisés + - Concontent: Contenu extrait au format Markdown +- ** Texte **: chaîne concaténée de tout contenu extrait + +## Structure de document +Chaque document contient: +- ** PageContent **: Le contenu principal de la page Web au format Markdown +- ** Metadata **: + - Source: L'URL de la page + - Métadonnées personnalisées supplémentaires (si spécifiées) + - Métadonnées filtrées (basées sur les clés omises) + +## Exemples d'utilisation + +### Grattage de base +```json +{ + "mode": "scrape", + "url": "https://example.com", + "limit": 1 +} +``` + +### Rampant avancé +```json +{ + "mode": "crawl", + "url": "https://example.com", + "limit": 25, + "additional_metadata": { + "category": "blog", + "source_type": "web" + }, + "params": { + "anti_bot": true, + "wait_for": ".content-loaded" + } +} +``` + +## Exemple + + Exemple sur Spider Node

Exemple sur Spider Node

+ +## Notes +- Le robotage respecte la limite spécifiée pour les opérations de crawl +- Tout le contenu est renvoyé au format Markdown +- La gestion des erreurs est intégrée à la fois pour les opérations de grattage et de rampe +- Les configurations JSON non valides sont traitées gracieusement +- Traitement économe en mémoire des grands sites Web +- Prend en charge l'extraction à une seule page et à plusieurs pages +- Manipulation automatique des métadonnées et filtrage diff --git a/fr/integrations/langchain/document-loaders/text-file.md b/fr/integrations/langchain/document-loaders/text-file.md new file mode 100644 index 00000000..9ab051fe --- /dev/null +++ b/fr/integrations/langchain/document-loaders/text-file.md @@ -0,0 +1,121 @@ +--- +description: Load data from text files. +--- + +# Fichier texte + +

Node de fichier texte

+ +Le chargeur de fichiers texte vous permet de charger et de traiter le contenu à partir de divers formats de fichiers textuels. Il prend en charge plusieurs types de fichiers et fournit des options flexibles pour la division de texte et la gestion des métadonnées. + +## Caractéristiques +- Prise en charge de plusieurs formats de fichiers textuels +- Capacité de chargement de fichiers multiples +- Support de division de texte +- Manipulation des métadonnées personnalisables +- Prise en charge de l'intégration du stockage +- Gestion des fichiers Base64 +- Formats de sortie multiples + +## Types de fichiers pris en charge +Le chargeur prend en charge une large gamme de formats de fichiers textuels: +- Fichiers texte (.txt) +- Fichiers Web (.html, .aspx, .asp, .css) +- Langages de programmation: + - C / c ++ (.cpp, .c, .h) + - C # (.cs) + - Aller (.go) + - Java (.java) + - JavaScript / TypeScript (.js, .ts) + - Php (.php) + - Python (.py, .python) + - Ruby (.rb, .Ruby) + - Rust (.RS) + - Scala (.sc, .scala) + - Solidité (.sol) + - Swift (.swift) + - Visual Basic (.vb) +- Marquage / style: + - CSS / Moins / SCSS (.css, .less, .scss) + - Markdown (.md, .markdown) + - XML (.xml) + - Latex (.tex, .ltx) +- Autre: + - Tampons de protocole (.proto) + - SQL (.SQL) + - RST (.RST) + +## Entrées + +### Paramètres requis +- ** Fichier TXT **: un ou plusieurs fichiers texte à traiter + - Accepte les fichiers du téléchargement local ou du stockage + - Prend en charge la sélection de fichiers multiples + +### Paramètres facultatifs +- ** Splitter du texte **: un séparateur de texte pour traiter le contenu extrait +- ** Métadonnées supplémentaires **: objet JSON avec des métadonnées supplémentaires à ajouter aux documents +- ** omettre les clés de métadonnées **: Liste des clés de métadonnées séparées par des virgules à exclure + - Format:`key1, key2, key3.nestedKey1` + - Utiliser * pour supprimer toutes les métadonnées par défaut + +## Sorties + +- ** Document **: tableau d'objets de document contenant: + - métadonnées: fichiers de métadonnées et champs personnalisés + - contenu de contenu: contenu texte extrait +- ** Texte **: chaîne concaténée de tout contenu extrait + +## Structure de document +Chaque document contient: +- ** PageContent **: le contenu principal du fichier texte +- ** Metadata **: + - Métadonnées de fichier par défaut + - Métadonnées personnalisées supplémentaires (si spécifiées) + - Métadonnées filtrées (basées sur les clés omises) + +## Exemples d'utilisation + +### Traitement de fichiers unique +```json +{ + "txtFile": "example.txt", + "metadata": { + "source": "local", + "category": "documentation" + } +} +``` + +### Traitement de fichiers multiples +```json +{ + "txtFile": ["doc1.txt", "doc2.md", "code.py"], + "metadata": { + "batch": "docs-2024", + "processor": "text-loader" + }, + "omitMetadataKeys": "source, timestamp" +} +``` + +## Intégration de stockage +Le chargeur prend en charge deux modes de source de fichiers: +1. ** Téléchargement direct **: fichiers téléchargés directement via l'interface +2. ** Intégration de stockage **: fichiers accessibles via le système de stockage + - Format:`FILE-STORAGE::filename.txt` + - Prend en charge l'organisation et le stockage spécifique à ChatFlow + +## Notes +- Gère le traitement de fichiers unique et multiple +- Prend en charge le contenu de fichier codé Base64 +- Gère automatiquement différents encodages de fichiers +- Traitement économe en mémoire des fichiers volumineux +- Conserve les métadonnées de fichier en cas de besoin +- Prend en charge le fractionnement du texte pour les grands documents +- Gère les caractères d'échappement dans le texte de sortie +- S'intègre au stockage spécifique à l'organisation + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/document-loaders/unstructured-file-loader.md b/fr/integrations/langchain/document-loaders/unstructured-file-loader.md new file mode 100644 index 00000000..905ed965 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/unstructured-file-loader.md @@ -0,0 +1,128 @@ +--- +description: Use Unstructured.io to load data from a file path. +--- + +# Chargeur de fichiers non structuré + +

Node de chargeur de fichiers non structuré

+ +Le chargeur de fichiers non structuré utilise[Unstructured.io](https://unstructured.io)pour extraire et traiter le contenu à partir de divers formats de fichiers. Il fournit des capacités d'analyse de document avancées avec des options configurables pour l'OCR, la chasse et l'extraction des métadonnées. + +## Caractéristiques +- Analyse avancée de documents +- Prise en charge de l'OCR avec plusieurs options de langue +- Stratégies de section flexibles +- Inférence de la structure du tableau +- Coordonnée +- Gestion de la pause de la page +- Traitement de la balise XML +- Sélection de modèle personnalisable +- Extraction de métadonnées + +## Configuration + +### Configuration de l'API +- URL de l'API par défaut:`https://api.unstructuredapp.io/general/v0/general` +- Nécessite une clé API de non structurée.io +- Peut être configuré via des variables d'environnement: + - `UNSTRUCTURED_API_URL` + - `UNSTRUCTURED_API_KEY` + +### Stratégies de traitement +- ** Stratégie **: la valeur par défaut est "Hi_res" + - Les options incluent diverses stratégies de traitement pour différents types de documents +- ** Stratégie de chasse **: + - Aucun (par défaut) + - by_title (texte de morceaux basé sur des titres) + +## Paramètres + +### Paramètres requis +- ** Fichier **: le document à traiter +- ** Clé API **: clé API non structurée.io (si elle n'est pas définie via l'environnement) + +### Paramètres facultatifs + +#### Options OCR +- ** Langues OCR **: tableau de langues pour le traitement OCR +- ** Encodage **: Spécifiez le codage du document + +#### Options de traitement +- ** Coordonnées **: Extraire les coordonnées des éléments (true / false) +- ** Structure de la table PDF **: Structure de table inférieure dans les PDF (vrai / faux) +- ** Tags XML **: Gardez les balises XML en sortie (true / false) +- ** Sauter les types de tables **: tableau des types de table pour sauter l'inférence +- ** modèle haute résolution **: spécifiez le nom du modèle haute résolution +- ** Inclure les pauses de page **: Inclure des informations de pause de page (vrai / false) + +#### Options de section texte +- ** Sections de plusieurs pages **: gérer les sections sur les pages (vrai / false) +- ** Combinez sous N Chars **: Combinez des éléments sous le nombre de caractères spécifié +- ** Nouveau après n Chars **: Créez un nouvel élément après le nombre spécifié de caractères +- ** Caractères max **: caractères maximum par élément + +## Structure de sortie + +### Format de document +Chaque élément traité devient un document avec: +- ** PageContent **: Contenu texte extrait +- ** Metadata **: + - Catégorie: Type d'élément + - Métadonnées supplémentaires du traitement + +### Types d'éléments +Le chargeur peut identifier divers types d'éléments: +- Blocs de texte +- Tables +- Listes +- Têtes +- Footters +- Breaks de page (si activé) +- Autres éléments structurels + +## Exemples d'utilisation + +### Configuration de base +```typescript +{ + "apiKey": "your-api-key", + "strategy": "hi_res", + "ocrLanguages": ["eng"] +} +``` + +### Traitement avancé +```typescript +{ + "apiKey": "your-api-key", + "strategy": "hi_res", + "coordinates": true, + "pdfInferTableStructure": true, + "chunkingStrategy": "by_title", + "multiPageSections": true, + "combineUnderNChars": 100, + "maxCharacters": 4000 +} +``` + +## Notes +- Les appels API sont effectués pour chaque demande de traitement de fichier +- La réponse comprend des éléments structurés avec du texte et des métadonnées +- Les éléments sont filtrés pour garantir un contenu texte valide +- Prend en charge le traitement basé sur les tampons +- Gestion des erreurs pour les réponses API +- Catégorisation automatique des métadonnées +- Traitement économe en mémoire + +## Meilleures pratiques +1. Définissez les paramètres de section appropriés pour votre cas d'utilisation +2. Considérez les paramètres de langue OCR pour les documents non anglais +3. Activer l'inférence de la structure du tableau pour les documents avec des tables +4. Utiliser les coordonnées lorsque les informations spatiales sont importantes +5. Configurer les limites de caractères en fonction de vos besoins de traitement en aval +6. Surveiller l'utilisation et les temps de réponse de l'API +7. Gérer les erreurs d'API potentielles dans votre flux de travail + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/document-loaders/unstructured-folder-loader.md b/fr/integrations/langchain/document-loaders/unstructured-folder-loader.md new file mode 100644 index 00000000..01f2cc99 --- /dev/null +++ b/fr/integrations/langchain/document-loaders/unstructured-folder-loader.md @@ -0,0 +1,155 @@ +--- +description: >- + Use Unstructured.io to load data from a folder. Note: Currently doesn't + support .png and .heic until unstructured is updated. +--- + +# Chargeur de dossiers non structurés + +

Node de chargeur de dossier non structuré

non structuré + +Le chargeur de dossier non structuré utilise[Unstructured.io](https://unstructured.io)Pour charger et traiter plusieurs documents à partir d'un dossier. Il fournit des capacités d'analyse de document avancées avec des options de configuration étendues pour l'OCR, le groupe et l'extraction des métadonnées. + +{% hint style = "avertissement"%} +Actuellement, ne prend pas en charge les fichiers .png et .heic jusqu'à la mise à jour non structurée. +{% EndHint%} + +## Caractéristiques +- Traitement par lots de plusieurs documents +- Plusieurs stratégies de traitement +- Support OCR avec plus de 15 langues +- Stratégies de section flexibles +- Inférence de la structure du tableau +- Options de traitement XML +- Gestion de la pause de la page +- Coordonnée +- Personnalisation des métadonnées + +## Configuration + +### Configuration de l'API +- URL de l'API par défaut:`http://localhost:8000/general/v0/general` +- Peut être configuré via une variable d'environnement:`UNSTRUCTURED_API_URL` +- Authentification des clés API facultative + +## Paramètres + +### Paramètres requis +- ** Path de dossier **: Chemin vers le dossier contenant des documents à traiter + +### Paramètres facultatifs + +#### Configuration de base +- ** URL API non structurée **: point de terminaison de l'API (par défaut: http: // localhost: 8000 / général / v0 / général) +- ** Stratégie **: stratégie de traitement (par défaut: auto) + - HI_RES: traitement haute résolution + - rapide: traitement rapide + - OCR_ONLY: Traitement axé sur l'OCR + - Auto: sélection automatique +- ** Encodage **: Encodage du document (par défaut: UTF-8) + +#### Options OCR +- ** Langues OCR **: Support linguistique multiple, y compris: + - Anglais (Eng) + - Espagnol (spa) + - Mandarin Chinois (CMN) + - Hindi (hin) + - Arabe (ARA) + - Portugais (POR) + - Bengali (Ben) + - Russe (RUS) + - Japonais (JPN) + - Et plus ... + +#### Options de traitement +- ** Les types de tables de saut de déduction **: Types de fichiers pour sauter l'extraction de la table (par défaut: ["PDF", "JPG", "PNG"]) +- ** Nom du modèle HI-RES **: Sélection du modèle pour la stratégie HI_RES (par défaut: Detectron2_onnx) + - Chipper: modèle VDU interne de non structuré + - Detectron2_onnx: la détection rapide d'objets de Facebook AI + - Yolox: détecteur en temps réel à un étage + - yolox_quantized: version optimisée yolox +- ** Coordonnées **: Extraire les coordonnées des éléments (par défaut: false) +- ** Inclure les pauses de page **: Inclure des éléments de pause de page +- ** Tags de conservation XML **: préserver les balises XML +- ** Sections de plusieurs pages **: gérer les sections de plusieurs pages + +#### Options de section texte +- ** Stratégie de section **: Méthode de bunking texte (par défaut: by_title) + - Aucun: pas de bond + - by_title: Chunk by Document titres +- ** Mélanger sous N Chars **: Taille minimale de morceaux +- ** NOUVEAU APRÈS N Chars **: Taille de morceau maximum doux +- ** Caractères max **: Taille du morceau maximum dur (par défaut: 500) + +#### Options de métadonnées +- ** Clé ID source **: clé pour l'identification de la source du document (par défaut: source) +- ** Metadata supplémentaires **: métadonnées personnalisées en tant que JSON +- ** omettre les métadonnées clés **: clés pour exclure des métadonnées + +## Types de fichiers pris en charge +- Documents: .doc, .docx, .odt, .ppt, .pptx, .pdf +- Feuilles de calcul: .xls, .xlsx +- Texte: .txt, .Text, .md, .rtf +- Web: .html, .htm +- Courriel: .eml, .msg +- Images: .jpg, .jpeg (Remarque: .png et .heic actuellement non pris en charge) + +## Structure de sortie + +### Format de document +Chaque document traité comprend: +- ** PageContent **: Contenu texte extrait +- ** Metadata **: + - Source: Document Source Identifier + - Métadonnées supplémentaires du traitement + - Métadonnées personnalisées (si spécifiées) + +## Exemples d'utilisation + +### Configuration de base +```json +{ + "folderPath": "/path/to/documents", + "strategy": "auto", + "encoding": "utf-8" +} +``` + +### Traitement avancé +```json +{ + "folderPath": "/path/to/documents", + "strategy": "hi_res", + "hiResModelName": "detectron2_onnx", + "ocrLanguages": ["eng", "spa", "fra"], + "chunkingStrategy": "by_title", + "maxCharacters": 500, + "coordinates": true, + "metadata": { + "source": "company_docs", + "department": "legal" + } +} +``` + +## Meilleures pratiques +1. Choisissez une stratégie appropriée basée sur la qualité des documents et les besoins de traitement +2. Configurer les langages OCR en fonction du contenu du document +3. Ajustez les paramètres de segment pour la segmentation optimale de texte +4. Utilisez un modèle haute résolution approprié pour votre cas d'utilisation +5. Considérez l'utilisation de la mémoire lors du traitement de grands dossiers +6. Surveiller l'utilisation et les temps de réponse de l'API +7. Gérer les erreurs d'API potentielles dans votre flux de travail + +## Notes +- Traiter plusieurs documents en lot +- Prend en charge divers formats de fichiers +- Traitement économe en mémoire +- Gestion automatique des métadonnées +- Formats de sortie flexibles +- Gestion des erreurs pour les réponses API +- Options de traitement configurables + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/README.md b/fr/integrations/langchain/embeddings/README.md new file mode 100644 index 00000000..f01d7af2 --- /dev/null +++ b/fr/integrations/langchain/embeddings/README.md @@ -0,0 +1,37 @@ +--- +description: LangChain Embedding Nodes +--- + +# Incorporer + +*** + +Une intégration est un vecteur (liste) de numéros de points flottants. La distance entre deux vecteurs mesure leur relation. De petites distances suggèrent une forte parenté et de grandes distances suggèrent une faible parenté. + +Les intégres peuvent être utilisés pour créer une représentation numérique des données textuelles. Cette représentation numérique est utile car elle peut être utilisée pour trouver des documents similaires. + +Ils sont couramment utilisés pour: + +* Recherche (où les résultats sont classés par pertinence pour une chaîne de requête) +* Clustering (où les chaînes de texte sont regroupées par similitude) +* Recommandations (où les éléments avec des chaînes de texte connexes sont recommandés) +* Détection d'anomalies (où les valeurs aberrantes avec peu de parenté sont identifiées) +* Mesure de la diversité (où les distributions de similitude sont analysées) +* Classification (où les chaînes de texte sont classées par leur étiquette la plus similaire) + +### Nœuds d'intégration: + +* [AWS Bedrock Embeddings](aws-bedrock-embeddings.md) +* [Azure OpenAI Embeddings](azure-openai-embeddings.md) +* [Cohere Embeddings](cohere-embeddings.md) +* [Google GenerativeAI Embeddings](googlegenerativeai-embeddings.md) +* [Google PaLM Embeddings](broken-reference) +* [Google VertexAI Embeddings](googlevertexai-embeddings.md) +* [HuggingFace Inference Embeddings](huggingface-inference-embeddings.md) +* [LocalAI Embeddings](localai-embeddings.md) +* [MistralAI Embeddings](mistralai-embeddings.md) +* [Ollama Embeddings](ollama-embeddings.md) +* [OpenAI Embeddings](openai-embeddings.md) +* [OpenAI Embeddings Custom](openai-embeddings-custom.md) +* [TogetherAI Embedding](togetherai-embedding.md) +* [VoyageAI Embeddings](voyageai-embeddings.md) diff --git a/fr/integrations/langchain/embeddings/aws-bedrock-embeddings.md b/fr/integrations/langchain/embeddings/aws-bedrock-embeddings.md new file mode 100644 index 00000000..29cba296 --- /dev/null +++ b/fr/integrations/langchain/embeddings/aws-bedrock-embeddings.md @@ -0,0 +1,11 @@ +--- +description: AWSBedrock embedding models to generate embeddings for a given text. +--- + +# Aws Boutrock Incorpings + +
+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/azure-openai-embeddings.md b/fr/integrations/langchain/embeddings/azure-openai-embeddings.md new file mode 100644 index 00000000..37b1956b --- /dev/null +++ b/fr/integrations/langchain/embeddings/azure-openai-embeddings.md @@ -0,0 +1,59 @@ +# Azure Openai Embeddings + +## Condition préalable + +1. [Log in](https://portal.azure.com/)ou[sign up](https://azure.microsoft.com/en-us/free/)à Azure +2. [Create](https://portal.azure.com/#create/Microsoft.CognitiveServicesOpenAI)Votre Azure Openai et attendez l'approbation d'environ 10 jours ouvrables +3. Votre touche API sera disponible sur ** Azure Openai **> Cliquez sur ** Nom \ _Azure \ _Openai **> Cliquez sur ** Cliquez ici pour gérer les touches ** + +
+ +## Installation + +### Azure Openai Embeddings + +1. Cliquez sur ** Accédez à Azure OpenAai Studio ** + +
+ +2. Cliquez sur ** Déploiements ** + +
+ +3. Cliquez sur ** Créer un nouveau déploiement ** + +
+ +4. Sélectionnez comme indiqué ci-dessous et cliquez sur ** Créer ** + +
+ +5. Créé avec succès ** Azure Openai Embeddings ** + +* Nom du déploiement:`text-embedding-ada-002` +* Nom de l'instance:`top right conner` + +
+ +### Couler + +1. ** Embeddings **> glisser ** Azure Openai Embeddings ** Node + +
+ +2. ** Connectez les informations d'identification **> Cliquez sur ** Créer un nouveau ** + +
+ +3. Copier et coller chaque détail (clé API, instance et nom de déploiement,[API Version](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)) dans ** azure openai embeddings ** titres d'identification + +
+ +4. Tour[🎉](https://emojipedia.org/party-popper/), vous avez créé ** Azure Openai Embeddings Node ** en Flowise + +
+ +## Ressources + +* [LangChain JS Azure OpenAI Embeddings](https://js.langchain.com/docs/modules/data\_connection/text\_embedding/integrations/azure\_openai) +* [Azure OpenAI Service REST API reference](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) diff --git a/fr/integrations/langchain/embeddings/cohere-embeddings.md b/fr/integrations/langchain/embeddings/cohere-embeddings.md new file mode 100644 index 00000000..7925fc55 --- /dev/null +++ b/fr/integrations/langchain/embeddings/cohere-embeddings.md @@ -0,0 +1,11 @@ +--- +description: Cohere API to generate embeddings for a given text +--- + +# COHERE ENGRESS + +

cohere embeddings node

google generativEai Embeddings nœud

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/googlevertexai-embeddings.md b/fr/integrations/langchain/embeddings/googlevertexai-embeddings.md new file mode 100644 index 00000000..186f72c3 --- /dev/null +++ b/fr/integrations/langchain/embeddings/googlevertexai-embeddings.md @@ -0,0 +1,11 @@ +--- +description: Google vertexAI API to generate embeddings for a given text. +--- + +# Google Vertexai Embeddings + +

google vertexai EmbedDing + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/huggingface-inference-embeddings.md b/fr/integrations/langchain/embeddings/huggingface-inference-embeddings.md new file mode 100644 index 00000000..bfe634d9 --- /dev/null +++ b/fr/integrations/langchain/embeddings/huggingface-inference-embeddings.md @@ -0,0 +1,11 @@ +--- +description: HuggingFace Inference API to generate embeddings for a given text. +--- + +# Embrassant les incorporations d'inférence + +

HuggingFace Inference EmbedDings Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/localai-embeddings.md b/fr/integrations/langchain/embeddings/localai-embeddings.md new file mode 100644 index 00000000..a603e22d --- /dev/null +++ b/fr/integrations/langchain/embeddings/localai-embeddings.md @@ -0,0 +1,46 @@ +# Incorporation locale + +## Configuration locale + +[**LocalAI** ](https://github.com/go-skynet/LocalAI)est une API de repos de remplacement qui est compatible avec les spécifications de l'API OpenAI pour l'inférence locale. Il vous permet d'exécuter des LLM (et pas seulement) localement ou sur site avec le matériel de qualité grand public, prenant en charge plusieurs familles de modèles compatibles avec le format GGML. + +Pour utiliser les incorporations locales dans Flowise, suivez les étapes ci-dessous: + +1. ```bash + git clone https://github.com/go-skynet/LocalAI + ``` +2.
   cd localai
+  
+3. Localai fournit un[API endpoint](https://localai.io/api-endpoints/index.html#applying-a-model---modelsapply)Pour télécharger / installer le modèle. Dans cet exemple, nous allons utiliser le modèle Bert Embeddings: + +
+ +4. Dans le`/models`dossier, vous devriez pouvoir y voir le modèle téléchargé: + +
+ +5. Vous pouvez maintenant tester les intégres: + +```bash +curl http://localhost:8080/v1/embeddings -H "Content-Type: application/json" -d '{ + "input": "Test", + "model": "text-embedding-ada-002" + }' +``` + +6. La réponse devrait ressembler: + +
+ +## Configuration de flux + +Faites glisser et déposez un nouveau composant localembeddings sur toile: + +
+ +Remplissez les champs: + +* ** Path de base **: L'URL de base de localai comme[http://localhost:8080/v1](http://localhost:8080/v1) +* ** Nom du modèle **: Le modèle que vous souhaitez utiliser. Notez que ce doit être à l'intérieur`/models`dossier du répertoire localai. Par exemple:`text-embedding-ada-002` + +C'est ça! Pour plus d'informations, reportez-vous à Localai[docs](https://localai.io/models/index.html#embeddings-bert). diff --git a/fr/integrations/langchain/embeddings/mistralai-embeddings.md b/fr/integrations/langchain/embeddings/mistralai-embeddings.md new file mode 100644 index 00000000..982534a3 --- /dev/null +++ b/fr/integrations/langchain/embeddings/mistralai-embeddings.md @@ -0,0 +1,11 @@ +--- +description: MistralAI API to generate embeddings for a given text. +--- + +# Mistralai Embeddings + +

Mistralai Embedding + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/ollama-embeddings.md b/fr/integrations/langchain/embeddings/ollama-embeddings.md new file mode 100644 index 00000000..418c1dae --- /dev/null +++ b/fr/integrations/langchain/embeddings/ollama-embeddings.md @@ -0,0 +1,11 @@ +--- +description: Generate embeddings for a given text using open source model on Ollama. +--- + +# Olllama Embeddings + +

olllama embeddings node

openai Embedding + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/openai-embeddings.md b/fr/integrations/langchain/embeddings/openai-embeddings.md new file mode 100644 index 00000000..79f99faa --- /dev/null +++ b/fr/integrations/langchain/embeddings/openai-embeddings.md @@ -0,0 +1,11 @@ +--- +description: OpenAI API to generate embeddings for a given text. +--- + +# Openai Embeddings + +

openai EmbedDing + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/togetherai-embedding.md b/fr/integrations/langchain/embeddings/togetherai-embedding.md new file mode 100644 index 00000000..81974e21 --- /dev/null +++ b/fr/integrations/langchain/embeddings/togetherai-embedding.md @@ -0,0 +1,11 @@ +--- +description: TogetherAI Embedding models to generate embeddings for a given text. +--- + +# Ensemble de l'intégration + +

Ensemblea Eembedding Node + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/embeddings/voyageai-embeddings.md b/fr/integrations/langchain/embeddings/voyageai-embeddings.md new file mode 100644 index 00000000..07989837 --- /dev/null +++ b/fr/integrations/langchain/embeddings/voyageai-embeddings.md @@ -0,0 +1,11 @@ +--- +description: Voyage AI API to generate embeddings for a given text. +--- + +# Voyageai intégrés + +

Voyageae embourdDing + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/README.md b/fr/integrations/langchain/llms/README.md new file mode 100644 index 00000000..d42ce532 --- /dev/null +++ b/fr/integrations/langchain/llms/README.md @@ -0,0 +1,22 @@ +--- +description: LangChain LLM Nodes +--- + +# LLMS + +*** + +Un modèle grand langage, LLM pour faire court, est un système d'IA formé sur des quantités massives de données texte. Cela leur permet de communiquer et de générer du texte humain en réponse à un large éventail d'invites et de questions. Essentiellement, ils peuvent comprendre et répondre à un langage complexe. + +### NODES LLM: + +* [AWS Bedrock](aws-bedrock.md) +* [Azure OpenAI](azure-openai.md) +* [NIBittensorLLM](broken-reference) +* [Cohere](cohere.md) +* [GooglePaLM](broken-reference) +* [GoogleVertex AI](googlevertex-ai.md) +* [HuggingFace Inference](huggingface-inference.md) +* [Ollama](ollama.md) +* [OpenAI](openai.md) +* [Replicate](replicate.md) diff --git a/fr/integrations/langchain/llms/aws-bedrock.md b/fr/integrations/langchain/llms/aws-bedrock.md new file mode 100644 index 00000000..aa813351 --- /dev/null +++ b/fr/integrations/langchain/llms/aws-bedrock.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around AWS Bedrock large language models. +--- + +# Bouilloire AWS + +

AWS Boundrock Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/azure-openai.md b/fr/integrations/langchain/llms/azure-openai.md new file mode 100644 index 00000000..614be318 --- /dev/null +++ b/fr/integrations/langchain/llms/azure-openai.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around Azure OpenAI large language models. +--- + +# Azure Openai + +

nœud openai azure

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/cohere.md b/fr/integrations/langchain/llms/cohere.md new file mode 100644 index 00000000..cca3795b --- /dev/null +++ b/fr/integrations/langchain/llms/cohere.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around Cohere large language models. +--- + +# Adhérer + +

cohere node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/googlevertex-ai.md b/fr/integrations/langchain/llms/googlevertex-ai.md new file mode 100644 index 00000000..ebf2bd95 --- /dev/null +++ b/fr/integrations/langchain/llms/googlevertex-ai.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around GoogleVertexAI large language models. +--- + +# Googlevertex ai + +

Googlevertex AI Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/huggingface-inference.md b/fr/integrations/langchain/llms/huggingface-inference.md new file mode 100644 index 00000000..6018efa2 --- /dev/null +++ b/fr/integrations/langchain/llms/huggingface-inference.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around HuggingFace large language models. +--- + +# Inférence de l'étreinte + +

Grougging FaceFt Inference Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/ollama.md b/fr/integrations/langchain/llms/ollama.md new file mode 100644 index 00000000..c77824ef --- /dev/null +++ b/fr/integrations/langchain/llms/ollama.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around open source large language models on Ollama. +--- + +# Ollla + +

ollama node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/openai.md b/fr/integrations/langchain/llms/openai.md new file mode 100644 index 00000000..56da5bf0 --- /dev/null +++ b/fr/integrations/langchain/llms/openai.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around OpenAI large language models. +--- + +# Openai + +

Openai Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/llms/replicate.md b/fr/integrations/langchain/llms/replicate.md new file mode 100644 index 00000000..490c3b59 --- /dev/null +++ b/fr/integrations/langchain/llms/replicate.md @@ -0,0 +1,11 @@ +--- +description: Use Replicate to run open source models on cloud. +--- + +# Reproduire + +

reproduir le nœud + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/memory/README.md b/fr/integrations/langchain/memory/README.md new file mode 100644 index 00000000..1782571d --- /dev/null +++ b/fr/integrations/langchain/memory/README.md @@ -0,0 +1,81 @@ +--- +description: LangChain Memory Nodes +--- + +# Mémoire + +*** + +La mémoire vous permet de discuter avec l'IA comme si l'IA avait la mémoire des conversations précédentes. + +_ humain: salut je suis bob _ + +_ AI: Bonjour Bob! Ravi de vous rencontrer. Comment puis-je vous aider aujourd'hui? _ + +_ Human: quel est mon nom? _ + +_ AI: Votre nom est Bob, comme vous l'avez mentionné plus tôt. _ + +Sous le capot, ces conversations sont stockées dans des tableaux ou des bases de données et fournies comme contexte à LLM. Par exemple: + +``` +You are an assistant to a human, powered by a large language model trained by OpenAI. + +Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. + +Current conversation: +{history} +``` + +### Nœuds de mémoire: + +* [Buffer Memory](buffer-memory.md) +* [Buffer Window Memory](buffer-window-memory.md) +* [Conversation Summary Memory](conversation-summary-memory.md) +* [Conversation Summary Buffer Memory](conversation-summary-buffer-memory.md) +* [DynamoDB Chat Memory](dynamodb-chat-memory.md) +* [Mem0 Memory](mem0-memory.md) +* [MongoDB Atlas Chat Memory](mongodb-atlas-chat-memory.md) +* [Redis-Backed Chat Memory](redis-backed-chat-memory.md) +* [Upstash Redis-Backed Chat Memory](upstash-redis-backed-chat-memory.md) +* [Zep Memory](zep-memory.md) + +## Conversations séparées pour plusieurs utilisateurs + +### UI et chat embarqué + +Par défaut, l'interface utilisateur et le chat embarqué séparent automatiquement les différentes conversations d'utilisateurs. Cela se fait en générant un ** unique **`chatId`** Pour chaque nouvelle interaction. Cette logique est manipulée sous le capot en fluant. + +### API de prédiction + +Vous pouvez séparer les conversations pour plusieurs utilisateurs en spécifiant un ** unique`sessionId`** + +1. Pour chaque nœud de mémoire, vous devriez pouvoir voir un paramètre d'entrée **`Session ID`** + +

+ +
+ +2. Dans le`/api/v1/prediction/{your-chatflowid}`Demande post-corps, spécifiez le **`sessionId`** dans **`overrideConfig`** + +```json +{ + "question": "hello!", + "overrideConfig": { + "sessionId": "user1" + } +} +``` + +### API du message + +* OBTENIR`/api/v1/chatmessage/{your-chatflowid}` +* SUPPRIMER`/api/v1/chatmessage/{your-chatflowid}` + + Query Param Type value sessiond string ASC Desc startDate string enddate string + +Toutes les conversations peuvent également être visualisées et gérées à partir de l'interface utilisateur: + +
+ +Pour l'assistant openai,[Threads](../agents/openai-assistant/threads.md)sera utilisé pour stocker des conversations. diff --git a/fr/integrations/langchain/memory/buffer-memory.md b/fr/integrations/langchain/memory/buffer-memory.md new file mode 100644 index 00000000..bf6a40f8 --- /dev/null +++ b/fr/integrations/langchain/memory/buffer-memory.md @@ -0,0 +1,12 @@ +# Mémoire tampon + +Utilisez la table de base de données Flowise`chat_message`comme mécanisme de stockage pour stocker / récupérer les conversations. + +
+ +## Saisir + +| Paramètre | Description | Par défaut | +| ---------- | ----------------------------------------------------------------------------- | ------------- | +| ID de session | Un identifiant pour récupérer / stocker les messages. S'il n'est pas spécifié, un ID aléatoire sera utilisé. | | +| Clé de mémoire | Une clé utilisée pour formater les messages dans le modèle d'invite | CHAT \ _History | diff --git a/fr/integrations/langchain/memory/buffer-window-memory.md b/fr/integrations/langchain/memory/buffer-window-memory.md new file mode 100644 index 00000000..10b870e1 --- /dev/null +++ b/fr/integrations/langchain/memory/buffer-window-memory.md @@ -0,0 +1,15 @@ +# Mémoire de fenêtre de tampon + +Utilisez la table de base de données Flowise`chat_message`comme mécanisme de stockage pour stocker / récupérer les conversations. + +La différence étant, elle ne fait que les dernières interactions k. Cette approche est bénéfique pour préserver une fenêtre coulissante des interactions les plus récentes, garantissant que le tampon reste gérable en taille. + +
+ +## Saisir + +| Paramètre | Description | Par défaut | +| ---------- | ----------------------------------------------------------------------------- | ------------- | +| Taille | Dernier k messages à récupérer | 4 | +| ID de session | Un identifiant pour récupérer / stocker les messages. S'il n'est pas spécifié, un ID aléatoire sera utilisé. | | +| Clé de mémoire | Une clé utilisée pour formater les messages dans le modèle d'invite | CHAT \ _History | diff --git a/fr/integrations/langchain/memory/conversation-summary-buffer-memory.md b/fr/integrations/langchain/memory/conversation-summary-buffer-memory.md new file mode 100644 index 00000000..016bee77 --- /dev/null +++ b/fr/integrations/langchain/memory/conversation-summary-buffer-memory.md @@ -0,0 +1,16 @@ +# Résumé de la conversation Mémoire de tampon + +Utilisez la table de base de données Flowise`chat_message`comme mécanisme de stockage pour stocker / récupérer les conversations. + +Cette mémoire garde un tampon d'interactions récentes et compile les anciennes en un résumé, en utilisant les deux dans son stockage. Au lieu de rincer les vieilles interactions basées uniquement sur leur nombre, il considère désormais la longueur totale des jetons pour décider quand les éliminer. + +
+ +## Saisir + +| Paramètre | Description | Par défaut | +| --------------- | ----------------------------------------------------------------------------- | ------------- | +| Modèle de chat | LLM utilisé pour effectuer un résumé | | +| Limite de jeton maximum | Résumez les conversations une fois que la limite de jeton est atteinte | 2000 | +| ID de session | Un identifiant pour récupérer / stocker les messages. S'il n'est pas spécifié, un ID aléatoire sera utilisé. | | +| Clé de mémoire | Une clé utilisée pour formater les messages dans le modèle d'invite | CHAT \ _History | diff --git a/fr/integrations/langchain/memory/conversation-summary-memory.md b/fr/integrations/langchain/memory/conversation-summary-memory.md new file mode 100644 index 00000000..510a1c4a --- /dev/null +++ b/fr/integrations/langchain/memory/conversation-summary-memory.md @@ -0,0 +1,15 @@ +# Réponse de la conversation Mémoire + +Utilisez la table de base de données Flowise`chat_message`comme mécanisme de stockage pour stocker / récupérer les conversations. + +Ce type de mémoire crée un bref résumé de la conversation au fil du temps. Ceci est utile pour raccourcir les informations provenant de longues discussions. Il met à jour et enregistre un résumé actuel au fil de la conversation. Cela est particulièrement utile pour les chats plus longs, où la sauvegarde de chaque message passé prendrait trop de place. + +
+ +## Saisir + +| Paramètre | Description | Par défaut | +| ---------- | ----------------------------------------------------------------------------- | ------------- | +| Modèle de chat | LLM utilisé pour effectuer un résumé | | +| ID de session | Un identifiant pour récupérer / stocker les messages. S'il n'est pas spécifié, un ID aléatoire sera utilisé. | | +| Clé de mémoire | Une clé utilisée pour formater les messages dans le modèle d'invite | CHAT \ _History | diff --git a/fr/integrations/langchain/memory/dynamodb-chat-memory.md b/fr/integrations/langchain/memory/dynamodb-chat-memory.md new file mode 100644 index 00000000..49a9ccf5 --- /dev/null +++ b/fr/integrations/langchain/memory/dynamodb-chat-memory.md @@ -0,0 +1,11 @@ +--- +description: Stores the conversation in dynamo db table. +--- + +# DynamoDB Chat Memory + +

Node de mémoire de chat DynamoDB

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/memory/mem0-memory.md b/fr/integrations/langchain/memory/mem0-memory.md new file mode 100644 index 00000000..89068c16 --- /dev/null +++ b/fr/integrations/langchain/memory/mem0-memory.md @@ -0,0 +1,93 @@ +# Mémoire MEM0 + +[Mem0](https://github.com/mem0ai/mem0)(prononcé "mem-zéro") améliore les assistants et agents de l'IA avec une couche de mémoire intelligente, permettant des interactions AI personnalisées. Il se souvient des préférences des utilisateurs, s'adapte aux besoins individuels et s'améliore en continu avec le temps. Cela le rend idéal pour des applications telles que les chatbots de support client, les assistants d'IA et les agents d'IA autonomes. + +MEM0 propose une suite complète de fonctionnalités de gestion de la mémoire, permettant une intégration transparente dans diverses applications axées sur l'IA. + +--- + +## Utilisation de MEM0 avec Flowise + +Suivez ces étapes pour intégrer MEM0 à Flowise: + +### 1. Configuration de Flowise + +1. Ouvrez l'application Flowise et créez une nouvelle toile, ou sélectionnez un modèle sur le marché Flowise. +2. Dans cet exemple, nous utilisons le modèle ** de chaîne de conversation **. +3. Remplacez la mémoire de tampon par défaut ** ** par ** MEM0 MEMER **. + + Flowise Memory Intégration
Intégration Flowise avec MEM0 + +### 2. Obtenez votre clé API MEM0 + +1. Accédez à la[Mem0 API Key dashboard](https://app.mem0.ai/dashboard/api-keys). +2. Générer ou copier votre clé API MEM0 existante. + + clés de l'API MEM0
Récupère la clé API de MEM0 + +### 3. Configurer les informations d'identification MEM0 en flux + +1. Entrez la clé de l'API ** MEM0 ** dans la section des informations d'identification MEM0. + + MEM0 Idementials
Configurer les informations d'identification de l'API
+ +### 4. Enregistrer et tester le chatflow + +1. Enregistrez votre configuration Flowise. +2. Exécutez un chat de test et stockez des informations. + + Flowise Test Chat
Test Memory Storage + +### 5. Vérifiez les souvenirs stockés dans le tableau de bord MEM0 + +1. Visiter le[Mem0 Dashboard](https://app.mem0.ai/dashboard/requests)pour examiner les souvenirs stockés. + + mem0 stocké de souvenirs
examiner les souvenirs stockés + +### 6. Valider la rétention de la mémoire + +1. Effacer l'historique de chat dans Flowise. +2. Posez une question basée sur les informations précédemment stockées pour confirmer la rétention. + + Tester Memory Retention
Confirmer la persistance de la mémoire + +--- + +## Paramètres supplémentaires + +MEM0 offre diverses options de personnalisation: + + MEM0 Settings
Options de configuration MEM0 + +1. ** Mode de recherche uniquement **: Active la récupération de la mémoire sans créer de nouvelles souvenirs. L'historique du chat reste jusqu'à ce qu'il soit éliminé manuellement. +2. ** Entités MEM0 **: Utiliser des identifiants tels que`user_id`, `run_id`, `app_id`, et`agent_id`Pour le contrôle de la mémoire granulaire. +3. ** ID du projet **: Attribuez un stockage de mémoire à un projet spécifique. Gérer les projets via[Mem0 Projects](https://app.mem0.ai/settings/projects/overview). +4. ** ID de l'organisation **: attribuer un stockage de mémoire à une organisation spécifique. Gérer les organisations via[Mem0 Organizations](https://app.mem0.ai/settings/organizations/overview). + +--- + +## Configurations de plate-forme MEM0 + +Des configurations supplémentaires sont disponibles sous[Mem0 Project Settings](https://app.mem0.ai/dashboard/project-settings): + +1. ** Instructions personnalisées **: Définissez les instructions au niveau du projet pour affiner l'extraction de la mémoire. Exemple: Extraire uniquement les détails académiques. +2. ** Date d'expiration **: Définissez une période d'expiration pour les souvenirs stockés, permettant l'élimination automatique des données si nécessaire. + + Paramètres du projet MEM0
Personnaliser les paramètres de niveau de projet + +--- + +## Configuration des informations d'identification MEM0 en flux + +Pour ajouter des informations d'identification en flux: + +1. Accédez aux paramètres des informations d'identification. +2. Ajoutez une nouvelle entrée d'identification pour MEM0. +3. Collez votre[Mem0 API Key](https://app.mem0.ai/dashboard/api-keys)Dans le champ de clé de l'API. + + Adding API Key in Flowise
Entrée de la touche API dans Flowise + +--- + +Avec ces configurations, votre configuration fluide s'intégrera de manière transparente à MEM0, offrant une rétention de mémoire améliorée et des interactions IA personnalisées. + diff --git a/fr/integrations/langchain/memory/mongodb-atlas-chat-memory.md b/fr/integrations/langchain/memory/mongodb-atlas-chat-memory.md new file mode 100644 index 00000000..08bbffa3 --- /dev/null +++ b/fr/integrations/langchain/memory/mongodb-atlas-chat-memory.md @@ -0,0 +1,11 @@ +--- +description: Stores the conversation in MongoDB Atlas. +--- + +# MONGEMENT DE CHAT MONGODB ATLAS + +

MongoDB ATLAS CHAT Memory Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/memory/redis-backed-chat-memory.md b/fr/integrations/langchain/memory/redis-backed-chat-memory.md new file mode 100644 index 00000000..bcbaa07d --- /dev/null +++ b/fr/integrations/langchain/memory/redis-backed-chat-memory.md @@ -0,0 +1,11 @@ +--- +description: Summarizes the conversation and stores the memory in Redis server. +--- + +# Mémoire de chat soutenue par Redis + +

Node de mémoire de chat soutenu par redé + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/memory/upstash-redis-backed-chat-memory.md b/fr/integrations/langchain/memory/upstash-redis-backed-chat-memory.md new file mode 100644 index 00000000..73d3f4e4 --- /dev/null +++ b/fr/integrations/langchain/memory/upstash-redis-backed-chat-memory.md @@ -0,0 +1,11 @@ +--- +description: Summarizes the conversation and stores the memory in Upstash Redis server. +--- + +# Mémoire de chat à soupçon de redissh + +

Upstash Redis-Backed Memory Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/memory/zep-memory.md b/fr/integrations/langchain/memory/zep-memory.md new file mode 100644 index 00000000..084f2f55 --- /dev/null +++ b/fr/integrations/langchain/memory/zep-memory.md @@ -0,0 +1,120 @@ +# Mémoire zep + +[Zep](https://github.com/getzep/zep)est un magasin de mémoire à long terme pour les applications LLM. Il stocke, résume, intègre, index et enrichit les histoires de l'application / chatbot LLM et les expose via des API simples à faible latence. + +## Guide pour déployer ZEP à rendre + +Vous pouvez facilement déployer ZEP sur des services cloud comme[Render](https://render.com/), [Flyio](https://fly.io/). Si vous préférez le tester localement, vous pouvez également tourner un conteneur Docker en suivant leur[quick guide](https://github.com/getzep/zep#quick-start). + +Dans cet exemple, nous allons déployer pour rendre. + +1. Se diriger vers[Zep Repo](https://github.com/getzep/zep#quick-start)et cliquez sur ** Déployer pour rendre ** +2. Cela vous amènera à la page Blueprint de Render et cliquez simplement sur ** Créer de nouvelles ressources ** + +
+ +3. Lorsque le déploiement est terminé, vous devriez voir 3 applications créées sur votre tableau de bord + +
+ +## Guide pour déployer ZEP vers Digital Ocean (via Docker) + +1. Cloner le repo + +```bash +git clone https://github.com/getzep/zep.git +cd zep +nano .env + +``` + +2. Ajoutez votre clé API Openai dans.env + +```bash +ZEP_OPENAI_API_KEY= + +``` + +```bash +docker compose up -d --build +``` + +3. Autoriser l'accès au pare-feu au port 8000 + +```bash +sudo ufw allow from any to any port 8000 proto tcp +ufw status numbered +``` + +Si vous utilisez un pare-feu séparé de Digital Ocean du tableau de bord, assurez-vous que le port 8000 y est également ajouté + +## Utiliser dans Flowise UI + +1. Retour à l'application Flowise, créez simplement une nouvelle toile ou utilisez l'un des modèles de Marketplace. Dans cet exemple, nous allons utiliser ** une simple chaîne conversationnelle ** + +
+ +2. Remplacer ** Mémoire de tampon ** par ** Mémoire ZEP **. Remplacez ensuite l'URL de base ** ** par l'URL ZEP que vous avez copiée ci-dessus + +
+ +3. Enregistrez le chatflow et testez-le pour voir si les conversations sont connues. + +
+ +4. Essayez maintenant d'effacer l'historique du chat, vous devriez voir qu'il est désormais incapable de vous souvenir des conversations précédentes. + +
+ +## Authentification ZEP + +ZEP vous permet de sécuriser votre instance à l'aide de l'authentification JWT. Nous utiliserons le`zepcli`utilitaire de ligne de commande[here](https://github.com/getzep/zepcli/releases). + +#### 1. Générez un token secret et jwt + +Après avoir téléchargé le Zepcli: + +Sur Linux ou macOS + +``` +./zepcli -i +``` + +Sous les fenêtres + +``` +zepcli.exe -i +``` + +Vous obtiendrez d'abord votre jeton secret: + +
+ +Ensuite, vous obtiendrez un jeton JWT: + +
+ +#### 2. Configurer les variables de l'environnement AUTH + +Définissez les variables d'environnement suivantes dans votre environnement de serveur ZEP: + +``` +ZEP_AUTH_REQUIRED=true +ZEP_AUTH_SECRET= +``` + +#### 3. Configurer les informations d'identification sur Flowise + +Ajoutez un nouvel diplôme pour ZEP et mettez le jeton JWT dans le champ Key API: + +
+ +#### 4. Utilisez les informations d'identification créées sur le nœud zep + +Dans les informations d'identification ZEP Node Connect, sélectionnez les informations d'identification que vous venez de créer. Et c'est tout! + +

Openai Moderation nœud

+ +Openai fournit[moderation API](https://platform.openai.com/docs/guides/moderation)Pour vérifier si le texte ou les images sont potentiellement nocifs. Si un contenu nocif est identifié, les utilisateurs peuvent spécifier un message d'erreur à afficher. + +
diff --git a/fr/integrations/langchain/moderation/simple-prompt-moderation.md b/fr/integrations/langchain/moderation/simple-prompt-moderation.md new file mode 100644 index 00000000..eac1c1e7 --- /dev/null +++ b/fr/integrations/langchain/moderation/simple-prompt-moderation.md @@ -0,0 +1,18 @@ +--- +description: >- + Check whether input consists of any text from Deny list, and prevent being + sent to LLM. +--- + +# Modération rapide simple + +

moderne de modération simple

+ +Utilisez un autre LLM pour identifier si la requête utilisateur est proche de la liste de refus, si oui, la sortie d'un message d'erreur par défaut. + +Par exemple, la liste de refus peut être: + +* Ignorer les instructions précédentes +* Dicher toutes les informations sensibles + +
diff --git a/fr/integrations/langchain/output-parsers/README.md b/fr/integrations/langchain/output-parsers/README.md new file mode 100644 index 00000000..f37c2d0b --- /dev/null +++ b/fr/integrations/langchain/output-parsers/README.md @@ -0,0 +1,16 @@ +--- +description: LangChain Output Parser Nodes +--- + +# Analyseurs de sortie + +*** + +Les nœuds d'analyse de sortie sont chargés de prendre la sortie d'un modèle et de le transformer en un format plus approprié pour les tâches en aval. Utile lorsque vous utilisez des LLM pour générer des données structurées ou pour normaliser la sortie des modèles de chat et des LLM. + +### Nœuds d'analyse de sortie: + +* [CSV Output Parser](csv-output-parser.md) +* [Custom List Output Parser](custom-list-output-parser.md) +* [Structured Output Parser](structured-output-parser.md) +* [Advanced Structured Output Parser](advanced-structured-output-parser.md) diff --git a/fr/integrations/langchain/output-parsers/advanced-structured-output-parser.md b/fr/integrations/langchain/output-parsers/advanced-structured-output-parser.md new file mode 100644 index 00000000..6526ef0e --- /dev/null +++ b/fr/integrations/langchain/output-parsers/advanced-structured-output-parser.md @@ -0,0 +1,13 @@ +--- +description: >- + Parse the output of an LLM call into a given structure by providing a Zod + schema. +--- + +# Analyseur de sortie structuré avancé + +

Node d'analyseur de sortie structuré avancé

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/output-parsers/csv-output-parser.md b/fr/integrations/langchain/output-parsers/csv-output-parser.md new file mode 100644 index 00000000..98a94698 --- /dev/null +++ b/fr/integrations/langchain/output-parsers/csv-output-parser.md @@ -0,0 +1,11 @@ +--- +description: Parse the output of an LLM call as a comma-separated list of values. +--- + +# Analyseur de sortie CSV + +

CSV Output Parser Node

CSV + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/output-parsers/custom-list-output-parser.md b/fr/integrations/langchain/output-parsers/custom-list-output-parser.md new file mode 100644 index 00000000..b2eaa3f0 --- /dev/null +++ b/fr/integrations/langchain/output-parsers/custom-list-output-parser.md @@ -0,0 +1,11 @@ +--- +description: Parse the output of an LLM call as a list of values. +--- + +# Analyseur de sortie de liste personnalisée + +

Node de l'analyse de sortie de liste de liste de liste

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/output-parsers/structured-output-parser.md b/fr/integrations/langchain/output-parsers/structured-output-parser.md new file mode 100644 index 00000000..42a7921a --- /dev/null +++ b/fr/integrations/langchain/output-parsers/structured-output-parser.md @@ -0,0 +1,11 @@ +--- +description: Parse the output of an LLM call into a given (JSON) structure. +--- + +# Analyseur de sortie structuré + +

Node de l'analyseur de sortie structuré

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/prompts/README.md b/fr/integrations/langchain/prompts/README.md new file mode 100644 index 00000000..74df1f7a --- /dev/null +++ b/fr/integrations/langchain/prompts/README.md @@ -0,0 +1,15 @@ +--- +description: LangChain Prompt Nodes +--- + +# Invite + +*** + +Les nœuds de modèle d'invite aident à traduire l'entrée et les paramètres de l'utilisateur en instructions pour un modèle de langue. Cela peut être utilisé pour guider la réponse d'un modèle, l'aidant à comprendre le contexte et à générer une sortie pertinente et cohérente basée sur le langage. + +### Nœuds rapides: + +* [Chat Prompt Template](chat-prompt-template.md) +* [Few Shot Prompt Template](few-shot-prompt-template.md) +* [Prompt Template](prompt-template.md) diff --git a/fr/integrations/langchain/prompts/chat-prompt-template.md b/fr/integrations/langchain/prompts/chat-prompt-template.md new file mode 100644 index 00000000..e16344df --- /dev/null +++ b/fr/integrations/langchain/prompts/chat-prompt-template.md @@ -0,0 +1,11 @@ +--- +description: Schema to represent a chat prompt. +--- + +# Modèle d'invite de chat + +

Node de modèle d'invite de chat

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/prompts/few-shot-prompt-template.md b/fr/integrations/langchain/prompts/few-shot-prompt-template.md new file mode 100644 index 00000000..f0a2cd8a --- /dev/null +++ b/fr/integrations/langchain/prompts/few-shot-prompt-template.md @@ -0,0 +1,11 @@ +--- +description: Prompt template you can build with examples. +--- + +# Peu de modèle d'invite de tir + +

peu de godet + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/prompts/prompt-template.md b/fr/integrations/langchain/prompts/prompt-template.md new file mode 100644 index 00000000..d93f060c --- /dev/null +++ b/fr/integrations/langchain/prompts/prompt-template.md @@ -0,0 +1,11 @@ +--- +description: Schema to represent a basic prompt for an LLM. +--- + +# Modèle invite + +

TEMPLEME Impuet + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/record-managers.md b/fr/integrations/langchain/record-managers.md new file mode 100644 index 00000000..740b64fa --- /dev/null +++ b/fr/integrations/langchain/record-managers.md @@ -0,0 +1,97 @@ +--- +description: LangChain Record Manager Nodes +--- + +# Gestionnaires de dossiers + +*** + +Les gestionnaires d'enregistrement gardent une trace de vos documents indexés, empêchant les intégres du vecteur dupliqués dans[Vector Store](vector-stores/). + +Lorsque des morceaux de document augmentent, chaque morceau sera haché en utilisant[SHA-1](https://github.com/emn178/js-sha1)algorithme. Ces hachages seront stockés dans Record Manager. S'il y a un hachage existant, le processus d'incorporation et de mise en service sera ignoré. + +Dans certains cas, vous voudrez peut-être supprimer les documents existants dérivés des mêmes sources que les nouveaux documents indexés. Pour cela, il existe 3 modes de nettoyage pour le gestionnaire de disques: + +{% Tabs%} +{% tab title = "incrémentiel"%} +Lorsque vous augmentez plusieurs documents et que vous souhaitez empêcher la suppression des documents existants qui ne font pas partie du processus de mise en service actuel, utilisez le mode de nettoyage ** ** **. + +1. Ayons un gestionnaire de disques avec`Incremental`Nettoyage et`source`En tant que clé sourceid + +

src = "../../. Gitbook / Assets / Image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) .png" alt = "" width = "410">
+ +2. Et avoir les 2 documents suivants: + +| Texte | Métadonnées | +| ---- | ---------------- | +| Cat |`{source:"cat"}` | +| Chien |`{source:"dog"}` | + +
+ +
+ +3. Après un upsert, nous verrons 2 documents qui sont mis en place: + +
+ +* Le document original ** Cat ** est supprimé +* Un nouveau document avec ** Cats ** est ajouté +* ** Dog ** Le document est laissé intact +* Les incorporations vectorielles restantes dans le magasin vectoriel sont ** chats ** et ** chien ** + +
+{% endtab%} + +{% tab title = "full"%} +Lorsque vous augmentez plusieurs documents, ** le mode de nettoyage complet ** supprimera automatiquement tous les intérêts vectoriels qui ne font pas partie du processus de mise en service actuel. + +1. Ayons un gestionnaire de disques avec`Full`Nettoyage. Nous n'avons pas besoin d'avoir une clé sourceid pour le mode de nettoyage complet. + +
src = "../../. GitBook / Assets / Image (17) (1) (1) (1) (2) .png" alt = "" width = "407">
+ +2. Et avoir les 2 documents suivants: + +| Texte | Métadonnées | +| ---- | ---------------- | +| Cat |`{source:"cat"}` | +| Chien |`{source:"dog"}` | + +
+ +
+ +3. Après un upsert, nous verrons 2 documents qui sont mis en place: + +
+ +* Le document original ** Cat ** est supprimé +* Un nouveau document avec ** Cats ** est ajouté +* ** Dog ** Le document est supprimé +* Les incorporations vectorielles restantes dans le magasin vectoriel sont juste ** Cats ** + +
- + Cohere Rerank indexes the documents from most to least semantically relevant + to the query. +--- + +# Cohere Rerank Retriever + +

cohere rerank Retriever Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/custom-retriever.md b/fr/integrations/langchain/retrievers/custom-retriever.md new file mode 100644 index 00000000..0ab38009 --- /dev/null +++ b/fr/integrations/langchain/retrievers/custom-retriever.md @@ -0,0 +1,49 @@ +--- +description: Custom Retriever allows user to specify the format of the context to LLM +--- + +# Retriever personnalisé + +
+ +Par défaut, lorsque le contexte est récupéré à partir du magasin Vector, ils sont dans le format suivant: + +```json +[ + { + "pageContent": "This is an example", + "metadata": { + "source": "example.pdf" + } + }, + { + "pageContent": "This is example 2", + "metadata": { + "source": "example2.txt" + } + } +] +``` + +** PageContent ** du tableau sera assemblé en tant que chaîne et renvoyé à LLM pour l'achèvement. + +Cependant, dans certains cas, vous voudrez peut-être inclure des informations provenant de métadonnées pour donner plus d'informations à LLM, telles que Source, Link, etc. C'est là que ** personnalisé Retriever ** entre en jeu. Nous pouvons spécifier le format pour revenir à LLM. + +Par exemple, en utilisant le format suivant: + +```javascript +{{context}} +Source: {{metadata.source}} +``` + +Entraînera la chaîne combinée comme ci-dessous: + +``` +This is an example +Source: example.pdf + +This is example 2 +Source: example2.txt +``` + +Cela sera renvoyé à LLM. Étant donné que LLM possède désormais les sources des réponses, nous pouvons utiliser des invites pour instruire LLM de retourner des réponses suivies de citations. diff --git a/fr/integrations/langchain/retrievers/embeddings-filter-retriever.md b/fr/integrations/langchain/retrievers/embeddings-filter-retriever.md new file mode 100644 index 00000000..77dbb7f7 --- /dev/null +++ b/fr/integrations/langchain/retrievers/embeddings-filter-retriever.md @@ -0,0 +1,13 @@ +--- +description: >- + A document compressor that uses embeddings to drop documents unrelated to the + query. +--- + +# Filtre intégré Retriever + +

Embeddings Filter Retriever Node

diff --git a/fr/integrations/langchain/retrievers/hyde-retriever.md b/fr/integrations/langchain/retrievers/hyde-retriever.md new file mode 100644 index 00000000..4b8f5aa0 --- /dev/null +++ b/fr/integrations/langchain/retrievers/hyde-retriever.md @@ -0,0 +1,11 @@ +--- +description: Use HyDE retriever to retrieve from a vector store. +--- + +# Hyde Retriever + +

Hyde Retriever Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/llm-filter-retriever.md b/fr/integrations/langchain/retrievers/llm-filter-retriever.md new file mode 100644 index 00000000..9a505950 --- /dev/null +++ b/fr/integrations/langchain/retrievers/llm-filter-retriever.md @@ -0,0 +1,13 @@ +--- +description: >- + Iterate over the initially returned documents and extract, from each, only the + content that is relevant to the query. +--- + +# Filtre LLM Retriever + +

llm Filter Retriever Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/multi-query-retriever.md b/fr/integrations/langchain/retrievers/multi-query-retriever.md new file mode 100644 index 00000000..36b734f4 --- /dev/null +++ b/fr/integrations/langchain/retrievers/multi-query-retriever.md @@ -0,0 +1,13 @@ +--- +description: >- + Generate multiple queries from different perspectives for a given user input + query. +--- + +# Retriever multi-requêtes + +

Node de récupération multi-requête

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/page.md b/fr/integrations/langchain/retrievers/page.md new file mode 100644 index 00000000..a7ce11bf --- /dev/null +++ b/fr/integrations/langchain/retrievers/page.md @@ -0,0 +1,13 @@ +--- +description: >- + Voyage AI Rerank indexes the documents from most to least semantically + relevant to the query. +--- + +# Voyage Ai Rerank Retriever + +

Voyage Ai RERANK Retriever Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/prompt-retriever.md b/fr/integrations/langchain/retrievers/prompt-retriever.md new file mode 100644 index 00000000..b27536bd --- /dev/null +++ b/fr/integrations/langchain/retrievers/prompt-retriever.md @@ -0,0 +1,13 @@ +--- +description: >- + Store prompt template with name & description to be later queried by + MultiPromptChain. +--- + +# Retriever rapide + +

Invite Retriever Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/reciprocal-rank-fusion-retriever.md b/fr/integrations/langchain/retrievers/reciprocal-rank-fusion-retriever.md new file mode 100644 index 00000000..1d0d75e7 --- /dev/null +++ b/fr/integrations/langchain/retrievers/reciprocal-rank-fusion-retriever.md @@ -0,0 +1,11 @@ +--- +description: Reciprocal Rank Fusion to re-rank search results by multiple query generation. +--- + +# Ranqueur réciproque Fusion Retriever + +

Rank Rank Fusion Retriever Retriever Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/similarity-score-threshold-retriever.md b/fr/integrations/langchain/retrievers/similarity-score-threshold-retriever.md new file mode 100644 index 00000000..cf936439 --- /dev/null +++ b/fr/integrations/langchain/retrievers/similarity-score-threshold-retriever.md @@ -0,0 +1,11 @@ +--- +description: Return results based on the minimum similarity percentage. +--- + +# Score de similitude Threshold Retriever + +

Score de similitude Score de récupération de seuil

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/retrievers/vector-store-retriever.md b/fr/integrations/langchain/retrievers/vector-store-retriever.md new file mode 100644 index 00000000..20e56e08 --- /dev/null +++ b/fr/integrations/langchain/retrievers/vector-store-retriever.md @@ -0,0 +1,11 @@ +--- +description: Store vector store as retriever to be later queried by MultiRetrievalQAChain. +--- + +# Retriever du magasin vectoriel + +

Vector Store Retriever Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/text-splitters/README.md b/fr/integrations/langchain/text-splitters/README.md new file mode 100644 index 00000000..aba2770e --- /dev/null +++ b/fr/integrations/langchain/text-splitters/README.md @@ -0,0 +1,30 @@ +--- +description: LangChain Text Splitter Nodes +--- + +# Séparateurs de texte + +*** + +** Lorsque vous souhaitez faire face à de longs morceaux de texte, il est nécessaire de diviser ce texte en morceaux. ** \ +Aussi simple que cela puisse paraître, il y a beaucoup de complexité potentielle ici. Idéalement, vous voulez garder les morceaux de texte sémantiquement liés. Ce que signifie «sémantiquement lié» pourrait dépendre du type de texte. Ce cahier présente plusieurs façons de le faire. + +** À un niveau élevé, les séparateurs de texte fonctionnent comme suit: ** + +1. Divisez le texte en petits morceaux sémantiquement significatifs (souvent des phrases). +2. Commencez à combiner ces petits morceaux en un morceau plus grand jusqu'à ce que vous atteigniez une certaine taille (mesurée par une fonction). +3. Une fois que vous avez atteint cette taille, faites de ce morceau son propre texte, puis commencez à créer un nouveau morceau de texte avec un peu de chevauchement (pour garder le contexte entre des morceaux). + +** Cela signifie qu'il y a deux axes différents le long desquels vous pouvez personnaliser votre séparateur de texte: ** + +1. Comment le texte est divisé +2. Comment la taille du morceau est mesurée + +### Nœuds de séparateur de texte: + +* [Character Text Splitter](character-text-splitter.md) +* [Code Text Splitter](code-text-splitter.md) +* [Html-To-Markdown Text Splitter](html-to-markdown-text-splitter.md) +* [Markdown Text Splitter](markdown-text-splitter.md) +* [Recursive Character Text Splitter](recursive-character-text-splitter.md) +* [Token Text Splitter](token-text-splitter.md) diff --git a/fr/integrations/langchain/text-splitters/character-text-splitter.md b/fr/integrations/langchain/text-splitters/character-text-splitter.md new file mode 100644 index 00000000..ba516881 --- /dev/null +++ b/fr/integrations/langchain/text-splitters/character-text-splitter.md @@ -0,0 +1,11 @@ +--- +description: Splits only on one type of character (defaults to "\n\n"). +--- + +# Splier de texte de caractère + +

Splitter de texte de caractères

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/text-splitters/code-text-splitter.md b/fr/integrations/langchain/text-splitters/code-text-splitter.md new file mode 100644 index 00000000..9bcf72aa --- /dev/null +++ b/fr/integrations/langchain/text-splitters/code-text-splitter.md @@ -0,0 +1,11 @@ +--- +description: Split documents based on language-specific syntax. +--- + +# Code Splitter du texte + +

Code Node de séparateur de texte

- + Converts Html to Markdown and then split your content into documents based on + the Markdown headers. +--- + +# Splitter de texte HTML à markdown + +

html-to-markdown Text Splitter du séparateur

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/text-splitters/markdown-text-splitter.md b/fr/integrations/langchain/text-splitters/markdown-text-splitter.md new file mode 100644 index 00000000..95c94ad2 --- /dev/null +++ b/fr/integrations/langchain/text-splitters/markdown-text-splitter.md @@ -0,0 +1,11 @@ +--- +description: Split your content into documents based on the Markdown headers. +--- + +# SPIRTER DE TEXTE DE MARCHE + +

Markdown Text Splitter Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/text-splitters/recursive-character-text-splitter.md b/fr/integrations/langchain/text-splitters/recursive-character-text-splitter.md new file mode 100644 index 00000000..d551fda4 --- /dev/null +++ b/fr/integrations/langchain/text-splitters/recursive-character-text-splitter.md @@ -0,0 +1,13 @@ +--- +description: >- + Split documents recursively by different characters - starting with "\n\n", + then "\n", then " ". +--- + +# Splitter de texte récursif du caractère + +

Recursive Character Text Splitter Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/text-splitters/token-text-splitter.md b/fr/integrations/langchain/text-splitters/token-text-splitter.md new file mode 100644 index 00000000..33a3c597 --- /dev/null +++ b/fr/integrations/langchain/text-splitters/token-text-splitter.md @@ -0,0 +1,14 @@ +--- +description: >- + Splits a raw text string by first converting the text into BPE tokens, then + split these tokens into chunks and convert the tokens within a single chunk + back into text. +--- + +# Séparateur de texte de jeton + +

Node de séparateur de texte de token

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/README.md b/fr/integrations/langchain/tools/README.md new file mode 100644 index 00000000..9dc46260 --- /dev/null +++ b/fr/integrations/langchain/tools/README.md @@ -0,0 +1,31 @@ +--- +description: LangChain Tool Nodes +--- + +# Outils + +*** + +Les outils sont des fonctions que les agents peuvent utiliser pour interagir avec le monde. Ces outils peuvent être des services publics génériques (par exemple, recherche), d'autres chaînes ou même d'autres agents. + +### Nœuds d'outil: + +* [BraveSearch API](bravesearch-api.md) +* [Calculator](calculator.md) +* [Chain Tool](chain-tool.md) +* [Chatflow Tool](chatflow-tool.md) +* [Custom Tool](custom-tool.md) +* [Exa Search](exa-search.md) +* [Google Custom Search](google-custom-search.md) +* [OpenAPI Toolkit](openapi-toolkit.md) +* [Python Interpreter](python-interpreter.md) +* [Read File](read-file.md) +* [Request Get](request-get.md) +* [Request Post](request-post.md) +* [Retriever Tool](retriever-tool.md) +* [SearchApi](searchapi.md) +* [SearXNG](searxng.md) +* [Serp API](serp-api.md) +* [Serper](serper.md) +* [Web Browser](web-browser.md) +* [Write File](write-file.md) diff --git a/fr/integrations/langchain/tools/bravesearch-api.md b/fr/integrations/langchain/tools/bravesearch-api.md new file mode 100644 index 00000000..da6f0794 --- /dev/null +++ b/fr/integrations/langchain/tools/bravesearch-api.md @@ -0,0 +1,13 @@ +--- +description: >- + Wrapper around BraveSearch API - a real-time API to access Brave search + results. +--- + +# API Bravesearch + +

Bravesearch API Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/calculator.md b/fr/integrations/langchain/tools/calculator.md new file mode 100644 index 00000000..7c3205d9 --- /dev/null +++ b/fr/integrations/langchain/tools/calculator.md @@ -0,0 +1,11 @@ +--- +description: Perform calculations on response. +--- + +# Calculatrice + +
+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/chain-tool.md b/fr/integrations/langchain/tools/chain-tool.md new file mode 100644 index 00000000..30a3d504 --- /dev/null +++ b/fr/integrations/langchain/tools/chain-tool.md @@ -0,0 +1,11 @@ +--- +description: Use a chain as allowed tool for agent. +--- + +# Outil de chaîne + +

Node d'outil de chaîne

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/chatflow-tool.md b/fr/integrations/langchain/tools/chatflow-tool.md new file mode 100644 index 00000000..aaed2cf2 --- /dev/null +++ b/fr/integrations/langchain/tools/chatflow-tool.md @@ -0,0 +1,11 @@ +--- +description: Execute another chatflow and get the response. +--- + +# Outil Chatflow + +
+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/custom-tool.md b/fr/integrations/langchain/tools/custom-tool.md new file mode 100644 index 00000000..7f4d1d93 --- /dev/null +++ b/fr/integrations/langchain/tools/custom-tool.md @@ -0,0 +1,598 @@ +# Outil personnalisé + +Regardez comment utiliser des outils personnalisés + +{% embed url = "https://youtu.be/hsp9lkktvy0"%} + +## Problème + +La fonction prend généralement des données d'entrée structurées. Disons que vous voulez que le LLM puisse appeler AirTable Create Record[API](https://airtable.com/developers/web/api/create-records), les paramètres du corps doivent être structurés de manière spécifique. Par exemple: + +```json +"records": [ + { + "fields": { + "Address": "some address", + "Name": "some name", + "Visited": true + } + } +] +``` + +Idéalement, nous voulons que LLM renvoie des données structurées appropriées comme celle-ci: + +```json +{ + "Address": "some address", + "Name": "some name", + "Visited": true +} +``` + +Nous pouvons donc extraire la valeur et l'analyser dans le corps nécessaire à l'API. Cependant, il est difficile de demander à LLM de produire le modèle exact. + +Avec le nouveau[OpenAI Function Calling](https://openai.com/blog/function-calling-and-other-api-updates)modèles, c'est maintenant possible.`gpt-4-0613`et`gpt-3.5-turbo-0613`sont spécifiquement formés pour retourner des données structurées. Le modèle choisira intelligemment de sortir un objet JSON contenant des arguments pour appeler ces fonctions. + +## Tutoriel + +** Objectif **: Demandez à l'agent d'obtenir automatiquement le mouvement du cours de l'action, de récupérer les actualités des actions connexes et d'ajouter un nouveau record à AirTable. + +Commençons[🚀](https://emojipedia.org/rocket/) + +### Créer des outils + +Nous avons besoin de 3 outils pour atteindre l'objectif: + +* Obtenez un mouvement du cours des actions +* Obtenir des actualités +* Ajouter un enregistrement Airtable + +#### Obtenez un mouvement du cours des actions + +Créez un nouvel outil avec les détails suivants (vous pouvez changer comme vous le souhaitez): + +* Nom: Get \ _Stock \ _Movers +* Description: Obtenez les actions qui ont des mouvements de prix / volume les plus importants, par exemple actifs, gagnants, perdants, etc. + +La description est une pièce importante car Chatgpt s'appuie sur cela pour décider quand utiliser cet outil. + +
+ +* Fonction JavaScript: nous allons utiliser[Morning Star](https://rapidapi.com/apidojo/api/morning-star) `/market/v2/get-movers`API pour obtenir des données. Vous devez d'abord cliquer sur vous abonner au test si vous ne l'avez pas déjà fait, puis copiez le code et collez-le dans la fonction JavaScript. + * Ajouter`const fetch = require('node-fetch');`en haut pour importer la bibliothèque. Vous pouvez importer tous les nodejs intégrés[modules](https://www.w3schools.com/nodejs/ref_modules.asp)et[external libraries](https://github.com/FlowiseAI/Flowise/blob/main/packages/components/src/utils.ts#L289). + * Retourner le`result`à la fin. + +
+ +Le code final doit être: + +```javascript +const fetch = require('node-fetch'); +const url = 'https://morning-star.p.rapidapi.com/market/v2/get-movers'; +const options = { + method: 'GET', + headers: { + 'X-RapidAPI-Key': 'replace with your api key', + 'X-RapidAPI-Host': 'morning-star.p.rapidapi.com' + } +}; + +try { + const response = await fetch(url, options); + const result = await response.text(); + console.log(result); + return result; +} catch (error) { + console.error(error); + return ''; +} +``` + +Vous pouvez maintenant le sauver. + +#### Obtenir des actualités + +Créez un nouvel outil avec les détails suivants (vous pouvez changer comme vous le souhaitez): + +* Nom: get \ _stock \ _news +* Description: Obtenez les dernières nouvelles pour un stock +* Schéma d'entrée: + * Propriété: PerformanceID + * Type: chaîne + * Description: ID du stock, qui est appelé performanceid dans l'API + * Requis: vrai + +Le schéma d'entrée indique à LLM quoi retourner en tant qu'objet JSON. Dans ce cas, nous nous attendons à un objet JSON comme ci-dessous: + +
   {"PerformanceId": "Some Ticker"}
+  
+ +
+ +* Fonction JavaScript: nous allons utiliser[Morning Star](https://rapidapi.com/apidojo/api/morning-star) `/market/v2/get-movers`API pour obtenir des données. Vous devez d'abord cliquer sur vous abonner au test si vous ne l'avez pas déjà fait, puis copiez le code et collez-le dans la fonction JavaScript. + * Ajouter`const fetch = require('node-fetch');`en haut pour importer la bibliothèque. Vous pouvez importer tous les nodejs intégrés[modules](https://www.w3schools.com/nodejs/ref_modules.asp)et[external libraries](https://github.com/FlowiseAI/Flowise/blob/main/packages/components/src/utils.ts#L289). + * Retourner le`result`à la fin. + +
+ +Le code final doit être: + +```javascript +const fetch = require('node-fetch'); +const url = 'https://morning-star.p.rapidapi.com/market/v2/get-movers'; +const options = { + method: 'GET', + headers: { + 'X-RapidAPI-Key': 'replace with your api key', + 'X-RapidAPI-Host': 'morning-star.p.rapidapi.com' + } +}; + +try { + const response = await fetch(url, options); + const result = await response.text(); + console.log(result); + return result; +} catch (error) { + console.error(error); + return ''; +} +``` + +Vous pouvez maintenant le sauver. + +#### Obtenir des actualités + +Créez un nouvel outil avec les détails suivants (vous pouvez changer comme vous le souhaitez): + +* Nom: get \ _stock \ _news +* Description: Obtenez les dernières nouvelles pour un stock +* Schéma d'entrée: + * Propriété: PerformanceID + * Type: chaîne + * Description: ID du stock, qui est appelé performanceid dans l'API + * Requis: vrai + +Le schéma d'entrée indique à LLM quoi retourner en tant qu'objet JSON. Dans ce cas, nous nous attendons à un objet JSON comme ci-dessous: + +
   {"PerformanceId": "Some Ticker"}
+  
+ +
+ +Code final: + +```javascript +const fetch = require('node-fetch'); +const url = 'https://morning-star.p.rapidapi.com/news/list?performanceId=' + $performanceId; +const options = { + method: 'GET', + headers: { + 'X-RapidAPI-Key': 'replace with your api key', + 'X-RapidAPI-Host': 'morning-star.p.rapidapi.com' + } +}; + +try { + const response = await fetch(url, options); + const result = await response.text(); + console.log(result); + return result; +} catch (error) { + console.error(error); + return ''; +} +``` + +Vous pouvez maintenant le sauver. + +#### Ajouter un enregistrement Airtable + +Créez un nouvel outil avec les détails suivants (vous pouvez changer comme vous le souhaitez): + +* Nom: Add \ _airable +* Description: Ajouter le stock, le résumé des nouvelles et le déménagement à AirTable +* Schéma d'entrée: + * Propriété: stock + * Type: chaîne + * Description: Ticker d'origine + * Requis: vrai + * Propriété: déménager + * Type: chaîne + * Description: Déplacement des prix en% + * Requis: vrai + * Propriété: nouvelles \ _Summary + * Type: chaîne + * Description: Résumé des nouvelles du stock + * Requis: vrai + +Chatgpt renvoie un objet JSON comme ceci: + +```json +{ "stock": "SOME TICKER", "move": "20%", "news_summary": "Some summary" } +``` + +
+ +* Fonction JavaScript: nous allons utiliser[Airtable Create Record API](https://airtable.com/developers/web/api/create-records)Pour créer un nouvel enregistrement dans une table existante. Vous pouvez trouver le TableId et BaseID à partir de[here](https://www.highviewapps.com/kb/where-can-i-find-the-airtable-base-id-and-table-id/). Vous devrez également créer un jeton d'accès personnel, trouvez comment le faire[here](https://www.highviewapps.com/kb/how-do-i-create-an-airtable-personal-access-token/). + +Le code final devrait ressembler ci-dessous. Notez comment nous passons`$stock`, `$move`et`$news_summary`Comme variables: + +```javascript +const fetch = require('node-fetch'); +const baseId = 'your-base-id'; +const tableId = 'your-table-id'; +const token = 'your-token'; + +const body = { + "records": [ + { + "fields": { + "stock": $stock, + "move": $move, + "news_summary": $news_summary, + } + } + ] +}; + +const options = { + method: 'POST', + headers: { + 'Authorization': `Bearer ${token}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify(body) +}; + +const url = `https://api.airtable.com/v0/${baseId}/${tableId}` + +try { + const response = await fetch(url, options); + const text = await response.text(); + return text; +} catch (error) { + console.error(error); + return ''; +} +``` + +Vous pouvez maintenant le sauver. + +Vous devriez voir 3 outils créés: + +
+ +### Créer ChatFlow + +Vous pouvez utiliser le modèle ** Fonction Openai ** ** Agent ** à partir de Marketplace et remplacer les outils par ** outil personnalisé **. Sélectionnez l'outil que vous avez créé. + +Remarque: L'agent de fonction OpenAI prend uniquement en charge les modèles 0613 actuellement. + +
Quelle est l'action qui a le plus gros mouvement des prix aujourd'hui? _ + +_ L'action qui a le plus grand mouvement de prix aujourd'hui est Overstock.com (OSTK) avec une décision de prix de 17,47%. _ + +Vous pouvez ensuite suivre une autre question pour obtenir les nouvelles de ce stock particulier: + +_ Quelles sont les dernières nouvelles de ce stock qui pourraient provoquer le mouvement des prix? _ + +_ Voici les dernières nouvelles sur Overstock.com (OSTK) qui auraient pu provoquer le mouvement des prix: _ + +1. _ Titre: "Les actions de Overstock montent sur le succès de 21,5 millions d'offres pour le bain de lit et Beyond Actifs" Source: MarketWatch Publié Date: 22 juin 2023 Résumé: Les actions de OverStock.com ont connu une augmentation significative après avoir réussi à gagner une offre pour le bain de lit et au-delà des actifs d'une valeur de 21,5 millions. +2. _ Titre: "Penser aux options de trading ou aux actions en plates-formes Meta, Overstock.com, Walmart, Home Depot ou United Parcel Service?" Source: PR Newswire Publié Date: 22 juin 2023 Résumé: Cet article d'actualité traite des options de négociation potentielles et des actions, y compris Overstock.com, que les investisseurs peuvent considérer. _ + +_ Veuillez noter que ces articles de presse sont à des fins d'information uniquement et peuvent ne pas être la seule raison du mouvement des prix. Il est toujours recommandé de mener des recherches et des analyses approfondies avant de prendre des décisions d'investissement. _ + +Enfin, vous pouvez demander à Chatgpt d'ajouter un nouvel enregistrement à AirTable: + +_ Pouvez-vous ajouter un enregistrement à AirTable avec le ticker d'origine, le mouvement des prix et le résumé des nouvelles? _ + +_ J'ai ajouté un enregistrement à AirTable avec les détails suivants: _ + +_ Stock Ticker: Ostk Price Move: 17,47% News Résumé: Les actions d'OverStock.com ont connu une augmentation significative après avoir réussi à remporter une offre pour Bed Bath & Beyond d'actifs d'une valeur de 21,5 millions de dollars. _ + +[🎉](https://emojipedia.org/party-popper/)[🎉](https://emojipedia.org/party-popper/)** Voila! ** C'est ainsi que vous pouvez créer votre propre outil personnalisé et l'utiliser avec l'agent de fonction Openai! + +## Supplémentaire + +### Passer l'ID de session pour fonctionner + +Par défaut, la fonction dans l'outil personnalisé a accès aux configurations de flux suivantes: + +```json5 +$flow.sessionId +$flow.chatId +$flow.chatflowId +$flow.input +``` + +Vous trouverez ci-dessous un exemple de l'envoi du SessionID à Discord WebHook: + +{% Tabs%} +{% tab title = "javascript"%} +```javascript +const fetch = require('node-fetch'); +const webhookUrl = "https://discord.com/api/webhooks/1124783587267"; +const content = $content; // captured from input schema +const sessionId = $flow.sessionId; + +const body = { + "content": `${mycontent} and the sessionid is ${sessionId}` +}; + +const options = { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(body) +}; + +const url = `${webhookUrl}?wait=true` + +try { + const response = await fetch(url, options); + const text = await response.text(); + return text; +} catch (error) { + console.error(error); + return ''; +} +``` +{% endtab%} +{% endtabs%} + +### Passer les variables pour fonctionner + +Dans certains cas, vous souhaitez passer les variables à la fonction de l'outil personnalisé. + +Par exemple, vous créez un chatbot qui utilise un outil personnalisé. L'outil personnalisé exécute un appel post HTTP et une clé API est nécessaire pour une demande authentifiée réussie. Vous pouvez le passer comme variable. + +Par défaut, la fonction dans l'outil personnalisé a accès aux variables: + +``` +$vars. +``` + +Exemple de la façon de passer les variables en flux à l'aide de l'API et intégrée: + +{% Tabs%} +{% tab title = "JavaScript api"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/prediction/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "question": "Hey, how are you?", + "overrideConfig": { + "vars": { + "apiKey": "abc" + } + } +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} + +{% tab title = "embed"%} +```html + +``` +{% endtab%} +{% endtabs%} + +Exemple de la façon de recevoir les variables dans l'outil personnalisé: + +{% Tabs%} +{% tab title = "javascript"%} +```javascript +const fetch = require('node-fetch'); +const webhookUrl = "https://discord.com/api/webhooks/1124783587267"; +const content = $content; // captured from input schema +const sessionId = $flow.sessionId; +const apiKey = $vars.apiKey; + +const body = { + "content": `${mycontent} and the sessionid is ${sessionId}` +}; + +const options = { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${apiKey}` + }, + body: JSON.stringify(body) +}; + +const url = `${webhookUrl}?wait=true` + +try { + const response = await fetch(url, options); + const text = await response.text(); + return text; +} catch (error) { + console.error(error); + return ''; +} +``` +{% endtab%} +{% endtabs%} + +### Remplacer l'outil personnalisé + +Les paramètres ci-dessous peuvent être remplacés + +| Paramètre | Description | +| ---------------- | ---------------- | +| CustomToolName | Nom de l'outil | +| CustomTooldeSC | Description de l'outil | +| CustomToolSchema | Schéma d'outil | +| CustomToolfunc | Fonction de l'outil | + +Exemple d'un appel API pour remplacer les paramètres d'outil personnalisés: + +{% Tabs%} +{% tab title = "JavaScript api"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/prediction/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "question": "Hey, how are you?", + "overrideConfig": { + "customToolName": "example_tool", + "customToolSchema": "z.object({title: z.string()})" + } +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +### Importer des dépendances externes + +Vous pouvez importer tous les nodejs intégrés[modules](https://www.w3schools.com/nodejs/ref_modules.asp)et soutenu[external libraries](https://github.com/FlowiseAI/Flowise/blob/main/packages/components/src/utils.ts#L289)en fonction. + +1. Pour importer toutes les bibliothèques non soutenues, vous pouvez facilement ajouter le nouveau package NPM à`package.json`dans`packages/components`dossier. + +```bash +cd Flowise && cd packages && cd components +pnpm add +cd .. && cd .. +pnpm install +pnpm build +``` + +2. Ensuite, ajoutez les bibliothèques importées à`TOOL_FUNCTION_EXTERNAL_DEP`variable d'environnement. Référer[#builtin-and-external-dependencies](../../../configuration/environment-variables.md#builtin-and-external-dependencies "mention")pour plus de détails. +3. Démarrer l'application + +```bash +pnpm start +``` + +4. Vous pouvez ensuite utiliser la bibliothèque nouvellement ajoutée dans la fonction ** javascript ** comme ça: + +```javascript +const axios = require('axios') +``` + +Regardez comment ajouter des dépendances supplémentaires et des bibliothèques d'importation + +{% embed url = "https://youtu.be/0h1rrisc0ok"%} diff --git a/fr/integrations/langchain/tools/exa-search.md b/fr/integrations/langchain/tools/exa-search.md new file mode 100644 index 00000000..595a33fb --- /dev/null +++ b/fr/integrations/langchain/tools/exa-search.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around Exa Search API - search engine fully designed for use by LLMs. +--- + +# Recherche exa + +

nœud de recherche exa

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/gmail.md b/fr/integrations/langchain/tools/gmail.md new file mode 100644 index 00000000..308d2f6c --- /dev/null +++ b/fr/integrations/langchain/tools/gmail.md @@ -0,0 +1,72 @@ +# Gmail + +## Créer des informations d'identification dans Flowise + +1. Ajouter un nouvel diplôme Gmail OAuth2 +2. Entrez un nom pour les informations d'identification. +3. Copiez l'URL de redirection OAuth. +4. Notez que les champs suivants doivent être remplis: + * ID client + * Secret client + +
+ +## Créer / utiliser Google Project + +1. Connectez-vous à votre[**Google Cloud**](https://console.cloud.google.com/)compte. +2. Se diriger vers[**Google Cloud Console > APIs & Services**](https://console.cloud.google.com/apis/credentials), et sélectionnez le projet que vous souhaitez utiliser dans la liste déroulante en haut à gauche (ou créez un nouveau projet et sélectionnez-le). +3. Configurez l'écran de consentement ** OAuth ** Si vous n'en avez pas confiuré auparavant. + +
+ +4. Accédez à ** des informations d'identification **, puis cliquez sur ** + Créer des informations d'identification> ID client OAuth **. + +
+ +5. Dans le ** Type d'application ** Dropdown, sélectionnez ** Application Web **. +6. Sous ** URIS REDIRECT AMÉCIRISÉ **, cliquez sur ** + Ajouter URI ** et collez l'URL de redirection OAuth copiée plus tôt. +7. Cliquez sur ** Créer **. + +
+ +8. Copiez l'ID client et le secret du client: + +
+ +9. Dans ** APPATIVE API & SERVICES **, cliquez sur ** + Activer les API et Services **. +10. Recherchez et activez l'API ** Gmail **. + +
+ +11. Retour à ** Indementiels **, cliquez sur les informations d'identification nouvellement créées sous ** OAuth 2.0 Client IDS **, et sur la page de détail, vous trouverez le ** Client ID ** et ** Client Secret **. + +## Terminer la configuration en flux + +1. Remplissez toutes les valeurs copiées plus tôt. Puis cliquez sur "** Authenticiate **": + +
+ +2. Une fenêtre de connexion Google apparaîtra: + +
+ +3. Accorder les autorisations: + +
+ +4. La fenêtre pop-up sera fermée automatiquement et les informations d'identification seront enregistrées et prêtes à être utilisées. + +## Utiliser comme outil d'agent + +Plusieurs actions peuvent être sélectionnées pour permettre à l'agent de choisir intelligemment celui approprié. \ +Les paramètres peuvent être laissés vides pour permettre à l'agent de déterminer les valeurs par elle-même. Cependant, si l'utilisateur fournit des valeurs, ceux-ci remplaceront les choix de l'agent. + +
+ +## Utiliser comme nœud d'outil + +Il peut également être utilisé comme nœud d'outil dans un scénario de flux de travail déterminé. Par exemple, la récupération d'une liste de messages projets avant de passer à l'étape suivante. \ +Dans ce mode, ** Les arguments d'entrée de l'outil doivent être explicitement définis et remplis de valeurs **. \ +Contrairement au[**Use as Agent Tool**](gmail.md#use-as-agent-tool)Option, il n'y a pas d'agent pour déterminer automatiquement les entrées. L'utilisateur doit remplir manuellement les champs, soit en entrant des valeurs fixes, soit en utilisant des variables enfermées dans des supports doubles bouclés`{{ }}`. + +
diff --git a/fr/integrations/langchain/tools/google-calendar.md b/fr/integrations/langchain/tools/google-calendar.md new file mode 100644 index 00000000..204188b6 --- /dev/null +++ b/fr/integrations/langchain/tools/google-calendar.md @@ -0,0 +1,72 @@ +# Calendrier Google + +## Créer des informations d'identification dans Flowise + +1. Ajouter un nouveau Calendrier Google OAuth2 +2. Entrez un nom pour les informations d'identification. +3. Copiez l'URL de redirection OAuth. +4. Notez que les champs suivants doivent être remplis: + * ID client + * Secret client + +
+ +## Créer / utiliser Google Project + +1. Connectez-vous à votre[**Google Cloud**](https://console.cloud.google.com/)compte. +2. Se diriger vers[**Google Cloud Console > APIs & Services**](https://console.cloud.google.com/apis/credentials), et sélectionnez le projet que vous souhaitez utiliser dans la liste déroulante en haut à gauche (ou créez un nouveau projet et sélectionnez-le). +3. Configurez l'écran de consentement ** OAuth ** Si vous n'en avez pas confiuré auparavant. + +
+ +4. Accédez à ** des informations d'identification **, puis cliquez sur ** + Créer des informations d'identification> ID client OAuth **. + +
+ +5. Dans le ** Type d'application ** Dropdown, sélectionnez ** Application Web **. +6. Sous ** URIS REDIRECT AMÉCIRISÉ **, cliquez sur ** + Ajouter URI ** et collez l'URL de redirection OAuth copiée plus tôt. +7. Cliquez sur ** Créer **. + +
+ +8. Copiez l'ID client et le secret du client: + +
+ +9. Dans ** APPATIVE API & SERVICES **, cliquez sur ** + Activer les API et Services **. +10. Recherchez et activez l'API ** Google Calendar **. + +
+ +11. Retour à ** Indementiels **, cliquez sur les informations d'identification nouvellement créées sous ** OAuth 2.0 Client IDS **, et sur la page de détail, vous trouverez le ** Client ID ** et ** Client Secret **. + +## Terminer la configuration en flux + +1. Remplissez toutes les valeurs copiées plus tôt. Puis cliquez sur "** Authenticiate **": + +
+ +2. Une fenêtre de connexion Google apparaîtra: + +
+ +3. Accorder les autorisations: + +
+ +4. La fenêtre pop-up sera fermée automatiquement et les informations d'identification seront enregistrées et prêtes à être utilisées. + +## Utiliser comme outil d'agent + +Plusieurs actions peuvent être sélectionnées pour permettre à l'agent de choisir intelligemment celui approprié. \ +Les paramètres peuvent être laissés vides pour permettre à l'agent de déterminer les valeurs par elle-même. Cependant, si l'utilisateur fournit des valeurs, ceux-ci remplaceront les choix de l'agent. + +
+ +## Utiliser comme nœud d'outil + +Il peut également être utilisé comme nœud d'outil dans un scénario de flux de travail déterminé. Par exemple, la récupération d'une liste de messages projets avant de passer à l'étape suivante. \ +Dans ce mode, ** Les arguments d'entrée de l'outil doivent être explicitement définis et remplis de valeurs **. \ +Contrairement au[**Use as Agent Tool**](google-calendar.md#use-as-agent-tool)Option, il n'y a pas d'agent pour déterminer automatiquement les entrées. L'utilisateur doit remplir manuellement les champs, soit en entrant des valeurs fixes, soit en utilisant des variables enfermées dans des supports doubles bouclés`{{ }}`. + +
diff --git a/fr/integrations/langchain/tools/google-custom-search.md b/fr/integrations/langchain/tools/google-custom-search.md new file mode 100644 index 00000000..3562b1ad --- /dev/null +++ b/fr/integrations/langchain/tools/google-custom-search.md @@ -0,0 +1,13 @@ +--- +description: >- + Wrapper around Google Custom Search API - a real-time API to access Google + search results. +--- + +# Recherche personnalisée Google + +

google meustrech nœud

+ +## Créer / utiliser Google Project + +1. Connectez-vous à votre[**Google Cloud**](https://console.cloud.google.com/)compte. +2. Se diriger vers[**Google Cloud Console > APIs & Services**](https://console.cloud.google.com/apis/credentials), et sélectionnez le projet que vous souhaitez utiliser dans la liste déroulante en haut à gauche (ou créez un nouveau projet et sélectionnez-le). +3. Configurez l'écran de consentement ** OAuth ** Si vous n'en avez pas confiuré auparavant. + +
+ +4. Accédez à ** des informations d'identification **, puis cliquez sur ** + Créer des informations d'identification> ID client OAuth **. + +
+ +5. Dans le ** Type d'application ** Dropdown, sélectionnez ** Application Web **. +6. Sous ** URIS REDIRECT AMÉCIRISÉ **, cliquez sur ** + Ajouter URI ** et collez l'URL de redirection OAuth copiée plus tôt. +7. Cliquez sur ** Créer **. + +
+ +8. Copiez l'ID client et le secret du client: + +
+ +9. Dans ** APPATIVE API & SERVICES **, cliquez sur ** + Activer les API et Services **. +10. Recherchez et activez l'API ** Google Drive **. + +
+ +11. Retour à ** Indementiels **, cliquez sur les informations d'identification nouvellement créées sous ** OAuth 2.0 Client IDS **, et sur la page de détail, vous trouverez le ** Client ID ** et ** Client Secret **. + +## Terminer la configuration en flux + +1. Remplissez toutes les valeurs copiées plus tôt. Puis cliquez sur "** Authenticiate **": + +
+ +2. Une fenêtre de connexion Google apparaîtra: + +
+ +3. Accorder les autorisations: + +
+ +4. La fenêtre pop-up sera fermée automatiquement et les informations d'identification seront enregistrées et prêtes à être utilisées. + +## Utiliser comme outil d'agent + +Plusieurs actions peuvent être sélectionnées pour permettre à l'agent de choisir intelligemment celui approprié. \ +Les paramètres peuvent être laissés vides pour permettre à l'agent de déterminer les valeurs par elle-même. Cependant, si l'utilisateur fournit des valeurs, ceux-ci remplaceront les choix de l'agent. + +
+ +## Utiliser comme nœud d'outil + +Il peut également être utilisé comme nœud d'outil dans un scénario de flux de travail déterminé. Par exemple, obtenez une feuille de calcul spécifique avant de passer à l'étape suivante. \ +Dans ce mode, ** Les arguments d'entrée de l'outil doivent être explicitement définis et remplis de valeurs **. \ +Contrairement au[**Use as Agent Tool**](google-drive.md#use-as-agent-tool)Option, il n'y a pas d'agent pour déterminer automatiquement les entrées. L'utilisateur doit remplir manuellement les champs, soit en entrant des valeurs fixes, soit en utilisant des variables enfermées dans des supports doubles bouclés`{{ }}`. + +
+ +Exemple de fichier PDF créé dans Google Drive à partir du nœud d'outil ci-dessus: + +
diff --git a/fr/integrations/langchain/tools/google-sheets.md b/fr/integrations/langchain/tools/google-sheets.md new file mode 100644 index 00000000..39405d5e --- /dev/null +++ b/fr/integrations/langchain/tools/google-sheets.md @@ -0,0 +1,72 @@ +# Feuilles Google + +## Créer des informations d'identification dans Flowise + +1. Ajouter un nouvel diplôme Google Sheets OAuth2 +2. Entrez un nom pour les informations d'identification. +3. Copiez l'URL de redirection OAuth. +4. Notez que les champs suivants doivent être remplis: + * ID client + * Secret client + +
+ +## Créer / utiliser Google Project + +1. Connectez-vous à votre[**Google Cloud**](https://console.cloud.google.com/)compte. +2. Se diriger vers[**Google Cloud Console > APIs & Services**](https://console.cloud.google.com/apis/credentials), et sélectionnez le projet que vous souhaitez utiliser dans la liste déroulante en haut à gauche (ou créez un nouveau projet et sélectionnez-le). +3. Configurez l'écran de consentement ** OAuth ** Si vous n'en avez pas confiuré auparavant. + +
+ +4. Accédez à ** des informations d'identification **, puis cliquez sur ** + Créer des informations d'identification> ID client OAuth **. + +
+ +5. Dans le ** Type d'application ** Dropdown, sélectionnez ** Application Web **. +6. Sous ** URIS REDIRECT AMÉCIRISÉ **, cliquez sur ** + Ajouter URI ** et collez l'URL de redirection OAuth copiée plus tôt. +7. Cliquez sur ** Créer **. + +
+ +8. Copiez l'ID client et le secret du client: + +
+ +9. Dans ** APPATIVE API & SERVICES **, cliquez sur ** + Activer les API et Services **. +10. Recherchez et activez l'API ** Google Sheets **. + +
+ +11. Retour à ** Indementiels **, cliquez sur les informations d'identification nouvellement créées sous ** OAuth 2.0 Client IDS **, et sur la page de détail, vous trouverez le ** Client ID ** et ** Client Secret **. + +## Terminer la configuration en flux + +1. Remplissez toutes les valeurs copiées plus tôt. Puis cliquez sur "** Authenticiate **": + +
+ +2. Une fenêtre de connexion Google apparaîtra: + +
+ +3. Accorder les autorisations: + +
+ +4. La fenêtre pop-up sera fermée automatiquement et les informations d'identification seront enregistrées et prêtes à être utilisées. + +## Utiliser comme outil d'agent + +Plusieurs actions peuvent être sélectionnées pour permettre à l'agent de choisir intelligemment celui approprié. \ +Les paramètres peuvent être laissés vides pour permettre à l'agent de déterminer les valeurs par elle-même. Cependant, si l'utilisateur fournit des valeurs, ceux-ci remplaceront les choix de l'agent. + +
+ +## Utiliser comme nœud d'outil + +Il peut également être utilisé comme nœud d'outil dans un scénario de flux de travail déterminé. Par exemple, obtenez une feuille de calcul spécifique avant de passer à l'étape suivante. \ +Dans ce mode, ** Les arguments d'entrée de l'outil doivent être explicitement définis et remplis de valeurs **. \ +Contrairement au[**Use as Agent Tool**](google-sheets.md#use-as-agent-tool)Option, il n'y a pas d'agent pour déterminer automatiquement les entrées. L'utilisateur doit remplir manuellement les champs, soit en entrant des valeurs fixes, soit en utilisant des variables enfermées dans des supports doubles bouclés`{{ }}`. + +
diff --git a/fr/integrations/langchain/tools/microsoft-outlook.md b/fr/integrations/langchain/tools/microsoft-outlook.md new file mode 100644 index 00000000..99c9b82c --- /dev/null +++ b/fr/integrations/langchain/tools/microsoft-outlook.md @@ -0,0 +1,78 @@ +# Microsoft Outlook + +## Condition préalable + +Une licence Microsoft 365 valide attribuée à l'utilisateur Azure Active Directory. Référer:[https://learn.microsoft.com/en-us/answers/questions/761931/microsoft-graph-api-throws-the-mailbox-is-either-i](https://learn.microsoft.com/en-us/answers/questions/761931/microsoft-graph-api-throws-the-mailbox-is-either-i) + +## Créer des informations d'identification dans Flowise + +1. Ajouter un nouvel diplôme Microsoft Outlook OAuth2 +2. Entrez un nom pour les informations d'identification. +3. Copiez l'URL de redirection OAuth. +4. Notez que les champs suivants doivent être remplis: + * URL d'autorisation + * URL du jeton d'accès + * ID client + * Secret client + +
+ +## Créez une application Azure + +1. Connectez-vous à votre existant[Azure](https://login.microsoftonline.com/)compte ou[sign up](https://signup.live.com/signup)Si vous ne vous êtes pas déjà inscrit +2. Recherchez ** les inscriptions d'applications **. +3. Ensuite, enregistrez une nouvelle application Azure dans[app registrations](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/CreateApplicationBlade/quickStartType~/null/isMSAApp~/false). + +
+ +4. Sous "Redirection URI (Facultatif)", sélectionnez "Web" et collez votre "URL de redirection OAuth" que vous avez copié plus tôt. + +
+ +5. Après une application créée, accédez à ** Certificats et secrets **> ** Secrets du client ** et cliquez sur le bouton "** Nouveau client Secret **" pour créer un secret client. Copiez le secret à utiliser plus tard. + +
+ +6. Naviguez vers ** Présentation ** et cliquez sur "** Points de terminaison **". Copiez les points de terminaison pour "** OAuth 2.0 Autorisation Endpoint (V2) **" et "** OAuth 2.0 Token Endpoint (V2) **". + +
+ +7. Fermez la fenêtre contextuelle des points de terminaison, copiez l'application ** (client) ID **: + +
+ +## Terminer la configuration en flux + +1. Remplissez toutes les valeurs copiées plus tôt. Puis cliquez sur "** Authenticiate **": + +
+ +2. Une fenêtre Microsoft apparaîtra, sélectionnez le compte. + +{% hint style = "avertissement"%} +L'utilisateur du compte doit avoir une sous-licence Microsoft 365 valide +{% EndHint%} + +
+ +3. Accorder les autorisations requises: + +
+ +4. Le pop-up se fermera automatiquement et les informations d'identification seront enregistrées par la suite. + +## Utiliser comme outil d'agent + +Plusieurs actions peuvent être sélectionnées pour permettre à l'agent de choisir intelligemment celui approprié. \ +Les paramètres peuvent être laissés vides pour permettre à l'agent de déterminer les valeurs par elle-même. Cependant, si l'utilisateur fournit des valeurs, ceux-ci remplaceront les choix de l'agent. + +
+ +## Utiliser comme nœud d'outil + +Il peut également être utilisé comme nœud d'outil dans un scénario de flux de travail déterminé. Par exemple, la récupération d'une liste des messages Outlook avant de passer à l'étape suivante. \ +Dans ce mode, ** Les arguments d'entrée de l'outil doivent être explicitement définis et remplis de valeurs **. \ +Contrairement au[**Use as Agent Tool**](microsoft-outlook.md#use-as-agent-tool)Option, il n'y a pas d'agent pour déterminer automatiquement les entrées. L'utilisateur doit remplir manuellement les champs, soit en entrant des valeurs fixes, soit en utilisant des variables enfermées dans des supports doubles bouclés`{{ }}`. + +
+ diff --git a/fr/integrations/langchain/tools/microsoft-teams.md b/fr/integrations/langchain/tools/microsoft-teams.md new file mode 100644 index 00000000..68adee10 --- /dev/null +++ b/fr/integrations/langchain/tools/microsoft-teams.md @@ -0,0 +1,77 @@ +# Microsoft Teams + +## Condition préalable + +Une licence Microsoft 365 valide attribuée à l'utilisateur Azure Active Directory. Référer:[https://learn.microsoft.com/en-us/answers/questions/761931/microsoft-graph-api-throws-the-mailbox-is-either-i](https://learn.microsoft.com/en-us/answers/questions/761931/microsoft-graph-api-throws-the-mailbox-is-either-i) + +## Créer des informations d'identification dans Flowise + +1. Ajouter un nouvel diplôme Microsoft Teams OAuth2 +2. Entrez un nom pour les informations d'identification. +3. Copiez l'URL de redirection OAuth. +4. Notez que les champs suivants doivent être remplis: + * URL d'autorisation + * URL du jeton d'accès + * ID client + * Secret client + +
+ +## Créez une application Azure + +1. Connectez-vous à votre existant[Azure](https://login.microsoftonline.com/)compte ou[sign up](https://signup.live.com/signup)Si vous ne vous êtes pas déjà inscrit +2. Recherchez ** les inscriptions d'applications **. +3. Ensuite, enregistrez une nouvelle application Azure dans[app registrations](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/CreateApplicationBlade/quickStartType~/null/isMSAApp~/false). + +
+ +4. Sous "Redirection URI (Facultatif)", sélectionnez "Web" et collez votre "URL de redirection OAuth" que vous avez copié plus tôt. + +
+ +5. Après une application créée, accédez à ** Certificats et secrets **> ** Secrets du client ** et cliquez sur le bouton "** Nouveau client Secret **" pour créer un secret client. Copiez le secret à utiliser plus tard. + +
+ +6. Naviguez vers ** Présentation ** et cliquez sur "** Points de terminaison **". Copiez les points de terminaison pour "** OAuth 2.0 Autorisation Endpoint (V2) **" et "** OAuth 2.0 Token Endpoint (V2) **". + +
+ +7. Fermez la fenêtre contextuelle des points de terminaison, copiez l'application ** (client) ID **: + +
+ +## Terminer la configuration en flux + +1. Remplissez toutes les valeurs copiées plus tôt. Puis cliquez sur "** Authenticiate **": + +
+ +2. Une fenêtre Microsoft apparaîtra, sélectionnez le compte. + +{% hint style = "avertissement"%} +L'utilisateur du compte doit avoir une sous-licence Microsoft 365 valide +{% EndHint%} + +
+ +3. Accorder les autorisations requises: + +
+ +4. Le pop-up se fermera automatiquement et les informations d'identification seront enregistrées par la suite. + +## Utiliser comme outil d'agent + +Plusieurs actions peuvent être sélectionnées pour permettre à l'agent de choisir intelligemment celui approprié. \ +Les paramètres peuvent être laissés vides pour permettre à l'agent de déterminer les valeurs par elle-même. Cependant, si l'utilisateur fournit des valeurs, ceux-ci remplaceront les choix de l'agent. + +
+ +## Utiliser comme nœud d'outil + +Il peut également être utilisé comme nœud d'outil dans un scénario de flux de travail déterminé. Par exemple, récupérer une liste des messages d'équipe avant de passer à l'étape suivante. \ +Dans ce mode, ** Les arguments d'entrée de l'outil doivent être explicitement définis et remplis de valeurs **. \ +Contrairement au[**Use as Agent Tool**](microsoft-teams.md#use-as-agent-tool)Option, il n'y a pas d'agent pour déterminer automatiquement les entrées. L'utilisateur doit remplir manuellement les champs, soit en entrant des valeurs fixes, soit en utilisant des variables enfermées dans des supports doubles bouclés`{{ }}`. + +
diff --git a/fr/integrations/langchain/tools/openapi-toolkit.md b/fr/integrations/langchain/tools/openapi-toolkit.md new file mode 100644 index 00000000..76d4c91f --- /dev/null +++ b/fr/integrations/langchain/tools/openapi-toolkit.md @@ -0,0 +1,11 @@ +--- +description: Load OpenAPI specification. +--- + +# Boîte à outils OpenAPI + +

OpenAPI Toolkit Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/python-interpreter.md b/fr/integrations/langchain/tools/python-interpreter.md new file mode 100644 index 00000000..c1f2e1ee --- /dev/null +++ b/fr/integrations/langchain/tools/python-interpreter.md @@ -0,0 +1,5 @@ +# Interprète de code par E2B + +[E2B](https://e2b.dev/)est un runtime open-source pour l'exécution du code généré par AI dans Secure Cloud Sandbox. Par exemple, lorsque l'utilisateur demande à générer un graphique à barres des données, LLM publiera le code Python nécessaire pour tracer le graphique. Ce code généré sera envoyé à E2B, et la sortie de l'exécution contient des images du graphique, des codes, du texte, etc. + +
diff --git a/fr/integrations/langchain/tools/read-file.md b/fr/integrations/langchain/tools/read-file.md new file mode 100644 index 00000000..b6ad8290 --- /dev/null +++ b/fr/integrations/langchain/tools/read-file.md @@ -0,0 +1,11 @@ +--- +description: Read file from disk. +--- + +# Lire le fichier + +

Read Fichier Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/request-get.md b/fr/integrations/langchain/tools/request-get.md new file mode 100644 index 00000000..e5924159 --- /dev/null +++ b/fr/integrations/langchain/tools/request-get.md @@ -0,0 +1,11 @@ +--- +description: Execute HTTP GET requests. +--- + +# Demander obtenir + +

request Obtenez le nœud

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/request-post.md b/fr/integrations/langchain/tools/request-post.md new file mode 100644 index 00000000..87919c65 --- /dev/null +++ b/fr/integrations/langchain/tools/request-post.md @@ -0,0 +1,11 @@ +--- +description: Execute HTTP POST requests. +--- + +# Demander la publication + +

demande le nœud post

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/retriever-tool.md b/fr/integrations/langchain/tools/retriever-tool.md new file mode 100644 index 00000000..b4ee1ce5 --- /dev/null +++ b/fr/integrations/langchain/tools/retriever-tool.md @@ -0,0 +1,11 @@ +--- +description: Use a retriever as allowed tool for agent. +--- + +# Outil de récupération + +

Retriever Tool Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/searchapi.md b/fr/integrations/langchain/tools/searchapi.md new file mode 100644 index 00000000..465a4fe5 --- /dev/null +++ b/fr/integrations/langchain/tools/searchapi.md @@ -0,0 +1,11 @@ +--- +description: Real-time API for accessing Google Search data. +--- + +# Searchapi + +

SearchAPI Node + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/searxng.md b/fr/integrations/langchain/tools/searxng.md new file mode 100644 index 00000000..13236137 --- /dev/null +++ b/fr/integrations/langchain/tools/searxng.md @@ -0,0 +1,34 @@ +--- +description: Wrapper around SearXNG - a free internet metasearch engine. +--- + +# Searxng + +

SearXng Node

+ +### Configuration de SearXng + +Suivre[official documentation](https://docs.searxng.org/admin/installation.html)pour configurer SearXng localement. Dans ce cas, nous utiliserons Docker Compose pour le configurer. + +Se diriger vers[searxng-docker](https://github.com/searxng/searxng-docker)Référentiel et suivez les instructions de configuration. + +Assurez-vous que vous avez`server.limiter`se mettre à`false`et`json`est inclus dans`search.formats`. Ces paramètres peuvent être définis dans`searxng/settings.yml` : + +```yaml +server: + limiter: false +general: + debug: true +search: + formats: + - html + - json +``` + +`docker-compose up -d`Pour démarrer le conteneur. Ouvrez le navigateur Web et accédez à ** http: // localhost: 8080 / search **, vous pourrez voir la page SearXng. + +### Utilisation de Flowise + +Faites glisser et déposez le nœud SearXng sur le toile. Remplissez l'URL de base comme ** http: // localhost: 8080. ** Vous pouvez également spécifier d'autres paramètres de recherche si nécessaire. LLM déterminera automatiquement ce qu'il faut utiliser pour la question de la requête de recherche. + +
diff --git a/fr/integrations/langchain/tools/serp-api.md b/fr/integrations/langchain/tools/serp-api.md new file mode 100644 index 00000000..f77d57e6 --- /dev/null +++ b/fr/integrations/langchain/tools/serp-api.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around SerpAPI - a real-time API to access Google search results. +--- + +# API SERP + +
+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/serper.md b/fr/integrations/langchain/tools/serper.md new file mode 100644 index 00000000..63b3fc58 --- /dev/null +++ b/fr/integrations/langchain/tools/serper.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around Serper.dev - Google Search API. +--- + +# Serper + +

serper nœud

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/tavily-ai.md b/fr/integrations/langchain/tools/tavily-ai.md new file mode 100644 index 00000000..750636d4 --- /dev/null +++ b/fr/integrations/langchain/tools/tavily-ai.md @@ -0,0 +1,19 @@ +--- +description: Wrapper around TavilyAI API - real-time, accurate search results tailored for LLMs and RAG. +--- + +# Tavilyai + +

nœud tavily

+ +## Installation + +1. Pour ajouter le nœud API Tavily. Cliquez sur le bouton Ajouter des nœuds, ** Langchain **> ** Outils **> ** API Tavily ** + +2. Créez des informations d'identification pour Tavily. Reportez-vous au[official guide](https://docs.tavily.com/guides/quickstart)sur la façon d'obtenir la clé API Tavily. + +
+ +
+ +3. Vous pouvez désormais connecter ce nœud à n'importe quel nœud qui accepte les entrées d'outil pour obtenir des résultats de recherche en temps réel. diff --git a/fr/integrations/langchain/tools/web-browser.md b/fr/integrations/langchain/tools/web-browser.md new file mode 100644 index 00000000..8c263b66 --- /dev/null +++ b/fr/integrations/langchain/tools/web-browser.md @@ -0,0 +1,11 @@ +--- +description: Gives agent the ability to visit a website and extract information. +--- + +# Navigateur Web + +

Node de navigateur Web

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/tools/write-file.md b/fr/integrations/langchain/tools/write-file.md new file mode 100644 index 00000000..af1501e5 --- /dev/null +++ b/fr/integrations/langchain/tools/write-file.md @@ -0,0 +1,11 @@ +--- +description: Write file to disk. +--- + +# Écrire un fichier + +

Node de fichier

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/vector-stores/README.md b/fr/integrations/langchain/vector-stores/README.md new file mode 100644 index 00000000..51d5f6a8 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/README.md @@ -0,0 +1,36 @@ +--- +description: LangChain Vector Store Nodes +--- + +# Magasins vectoriels + +*** + +Un magasin vectoriel ou une base de données vectorielle fait référence à un type de système de base de données spécialisée dans le stockage et la récupération des vecteurs numériques de haute dimension. Les magasins vectoriels sont conçus pour gérer et indexer efficacement ces vecteurs, permettant des recherches de similitude rapides. + +### Regardez une introduction sur les magasins vectoriels et comment vous pouvez l'utiliser sur Flowise + +{% embed url = "https://youtu.be/m0nr1_pnaxc"%} + +### Nœuds de magasin vectoriel: + +* [AstraDB](astradb.md) +* [Chroma](chroma.md) +* [Couchbase](couchbase.md) +* [Elastic](elastic.md) +* [Faiss](faiss.md) +* [In-Memory Vector Store](in-memory-vector-store.md) +* [Milvus](milvus.md) +* [MongoDB Atlas](mongodb-atlas.md) +* [OpenSearch](opensearch.md) +* [Pinecone](pinecone.md) +* [Postgres](postgres.md) +* [Qdrant](qdrant.md) +* [Redis](redis.md) +* [SingleStore](singlestore.md) +* [Supabase](supabase.md) +* [Upstash Vector](upstash-vector.md) +* [Vectara](vectara.md) +* [Weaviate](weaviate.md) +* [Zep Collection - Open Source](zep-collection-open-source.md) +* [Zep Collection - Cloud](zep-collection-cloud.md) diff --git a/fr/integrations/langchain/vector-stores/astradb.md b/fr/integrations/langchain/vector-stores/astradb.md new file mode 100644 index 00000000..18c5af1c --- /dev/null +++ b/fr/integrations/langchain/vector-stores/astradb.md @@ -0,0 +1,40 @@ +# Astradb + +## Installation + +1. Enregistrer un compte sur[AstraDB](https://astra.datastax.com/) +2. Connectez-vous vers le portail. Créer une base de données + +
+ +3. Choisissez Serverless (vecteur), remplissez le nom de la base de données, le fournisseur et la région + +
+ +4. Une fois la base de données configurée, saisissez le point de terminaison de l'API et générez un jeton d'application + +
+ +5. Créez une nouvelle collection, sélectionnez la métrique de dimension et de similitude souhaitée: + +
+ +6. Retour à la toile fluide, glisser et déposer le nœud Astra. Cliquez sur ** Créer un nouveau ** à partir de la liste déroulante des informations d'identification: + +
+ +7. Spécifiez le point de terminaison de l'API et le jeton d'application: + +
+ +8. Vous pouvez maintenant augmenter les données à Astradb + +
+ +9. Revenez au portail Astra et à votre collection, vous pourrez voir toutes les données qui ont été renversées: + +
+ +10. Commencez à interroger! + +
diff --git a/fr/integrations/langchain/vector-stores/chroma.md b/fr/integrations/langchain/vector-stores/chroma.md new file mode 100644 index 00000000..31a88b34 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/chroma.md @@ -0,0 +1,84 @@ +# Chrome + +## Condition préalable + +Vous avez besoin d'un serveur Chroma. Tu peux: + +1. Installez Chroma CLI et exécutez le serveur en utilisant`chroma run` +2. S'inscrire à[Chroma Cloud](https://trychroma.com/home). +3. Déployez votre propre instance chroma dans[Docker](https://docs.trychroma.com/guides/deploy/docker). + +## Installation + +| Entrée | Description | Par défaut | Cloud | +| --------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ----- | +| Document | Peut être connecté avec les nœuds de[Document Loader](../document-loaders/) | | | +| Intégres | Peut être connecté avec les nœuds de[Embeddings](../embeddings/) | | | +| Nom de la collection | Nom de la collection de chroma. Se référer à[here](https://docs.trychroma.com/usage-guide#creating-inspecting-and-deleting-collections)Pour la convention de dénomination | | | +| URL de chroma | Spécifiez l'URL de votre instance de chroma | http: // localhost: 8000 | https://api.trychroma.com:8000 | + +Pour Chroma Cloud, vous devrez obtenir votre identifiant de locataire et créer votre base de données et votre clé API. + +
+ +### Supplémentaire + +Si vous exécutez à la fois Flowise et Chrom sur Docker, des étapes supplémentaires sont impliquées. + +1. Tournez d'abord Chroma Docker + +```bash +docker compose up -d --build +``` + +2. Ouvrir`docker-compose.yml`en flux + +```bash +cd Flowise && cd docker +``` + +3. Modifiez le fichier en: + +```sh +version: '3.1' + +services: + flowise: + image: flowiseai/flowise + restart: always + environment: + - PORT=${PORT} + - DEBUG=${DEBUG} + - DATABASE_PATH=${DATABASE_PATH} + - SECRETKEY_PATH=${SECRETKEY_PATH} + - FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE} + - LOG_PATH=${LOG_PATH} + - LOG_LEVEL=${LOG_LEVEL} + - EXECUTION_MODE=${EXECUTION_MODE} + ports: + - '${PORT}:${PORT}' + volumes: + - ~/.flowise:/root/.flowise + networks: + - flowise_net + command: /bin/sh -c "sleep 3; flowise start" +networks: + flowise_net: + name: chroma_net + external: true +``` + +4. Spin up Flowise Docker Image + +```bash +docker compose up -d +``` + +5. Sur l'URL chroma, pour les systèmes d'exploitation Windows et MacOS spécifiez[http://host.docker.internal:8000](http://host.docker.internal:8000/). Pour les systèmes basés sur Linux, la passerelle Docker par défaut doit être utilisée car host.docker.internal n'est pas disponible:[http://172.17.0.1:8000](http://172.17.0.1:8000/) + +
+ +## Ressources + +* [LangChain JS Chroma](https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/chroma) +* [Chroma Getting Started](https://docs.trychroma.com/getting-started) diff --git a/fr/integrations/langchain/vector-stores/couchbase.md b/fr/integrations/langchain/vector-stores/couchbase.md new file mode 100644 index 00000000..7d1e3a30 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/couchbase.md @@ -0,0 +1,158 @@ +--- +description: >- + Upsert embedded data and perform vector search upon query using Couchbase, a + NoSQL cloud developer data platform for critical, AI-powered applications. +--- + +# Canapé + +## Condition préalable + +### Exigences + +1. Couchbase Cluster (Version auto-gérée ou capella) ** 7.6 + ** avec[Search Service](https://docs.couchbase.com/server/current/search/search.html). +2. Configuration de Capella: Pour en savoir plus sur la connexion à votre cluster Capella, veuillez suivre le[instructions](https://docs.couchbase.com/cloud/get-started/connect.html?_gl=1*1yhpmel*_gcl_au*MTMzNDE3NTQxLjE3MzY5MjA5MzQ.). + +Plus précisément, vous devez faire ce qui suit: + + * Créer le[database credentials](https://docs.couchbase.com/cloud/clusters/manage-database-users.html?_gl=1*19zk7vq*_gcl_au*MTMzNDE3NTQxLjE3MzY5MjA5MzQ.)pour accéder au cluster. + * [Allow access](https://docs.couchbase.com/cloud/clusters/allow-ip-address.html?_gl=1*19zk7vq*_gcl_au*MTMzNDE3NTQxLjE3MzY5MjA5MzQ.)au cluster à partir de l'IP sur lequel l'application est en cours d'exécution. + +Configuration auto-gérée: + + * Suivre[Couchbase Installation Options](https://developer.couchbase.com/tutorial-couchbase-installation-options)Pour l'installation de la dernière instance de serveur de base de données CouchBase. Assurez-vous d'ajouter le service de recherche. +3. Recherchez la création d'index sur le service de texte intégral dans Couchbase. + +### Index d'importation de recherche + +#### [Couchbase Capella](\(https:/docs.couchbase.com/cloud/search/import-search-index.html) + +Suivez ces étapes pour importer un index de recherche dans Capella: + +* Copiez la définition d'index dans un nouveau fichier nommé`index.json`. +* Importez le fichier dans Capella suivant les instructions de la documentation. +* Cliquez sur Créer l'index pour finaliser la création d'index. + +#### [Couchbase Server](\(https:/docs.couchbase.com/server/current/search/import-search-index.html) + +Suivez ces étapes pour le serveur CouchBase: + +* Naviguez vers la recherche → Ajouter un index → ​​l'importation. +* Copiez la définition d'index fournie dans l'écran d'importation. +* Cliquez sur Créer l'index pour finaliser la création d'index. + +Vous pouvez également créer un index vectoriel à l'aide de l'interface utilisateur de recherche sur les deux[Couchbase Capella](https://docs.couchbase.com/cloud/vector-search/create-vector-search-index-ui.html?_gl=1*1rglcpj*_gcl_au*MTMzNDE3NTQxLjE3MzY5MjA5MzQ.)et[Couchbase Self Managed Server](https://docs.couchbase.com/server/current/vector-search/create-vector-search-index-ui.html?_gl=1*t7aeet*_gcl_au*MTMzNDE3NTQxLjE3MzY5MjA5MzQ.). + +### Définition d'index + +Ici, nous créons l'index`vector-index`sur les documents. Le champ vectoriel est défini sur`embedding`avec 1536 dimensions et le champ de texte réglé sur`text`. Nous indexons et stockons également tous les champs sous`metadata`dans le document en tant que mappage dynamique pour tenir compte des structures de documents variables. La métrique de similitude est définie sur`dot_product`. S'il y a un changement dans ces paramètres, veuillez adapter l'index en conséquence. + +```json +{ + "name": "vector-index", + "type": "fulltext-index", + "params": { + "doc_config": { + "docid_prefix_delim": "", + "docid_regexp": "", + "mode": "scope.collection.type_field", + "type_field": "type" + }, + "mapping": { + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "default_mapping": { + "dynamic": true, + "enabled": false + }, + "default_type": "_default", + "docvalues_dynamic": false, + "index_dynamic": true, + "store_dynamic": false, + "type_field": "_type", + "types": { + "_default._default": { + "dynamic": true, + "enabled": true, + "properties": { + "embedding": { + "enabled": true, + "dynamic": false, + "fields": [ + { + "dims": 1536, + "index": true, + "name": "embedding", + "similarity": "dot_product", + "type": "vector", + "vector_index_optimized_for": "recall" + } + ] + }, + "metadata": { + "dynamic": true, + "enabled": true + }, + "text": { + "enabled": true, + "dynamic": false, + "fields": [ + { + "index": true, + "name": "text", + "store": true, + "type": "text" + } + ] + } + } + } + } + }, + "store": { + "indexType": "scorch", + "segmentVersion": 16 + } + }, + "sourceType": "gocbcore", + "sourceName": "pdf-chat", + "sourceParams": {}, + "planParams": { + "maxPartitionsPerPIndex": 64, + "indexPartitions": 16, + "numReplicas": 0 + } +} + +``` + +## Installation + +1. Ajoutez un nouveau nœud ** Couchbase ** sur toile et remplissez le nom du seau, le nom de la portée, le nom de la collection et le nom d'index + +
+ +2. Ajoutez de nouveaux informations d'identification et remplissez les paramètres: + * Chaîne de connexion Couchbase + * Nom d'utilisateur en grappes + * Mot de passe du cluster + + + +3. Ajouter des nœuds supplémentaires à la toile et démarrer le processus ussert + * ** Document ** peut être connecté à n'importe quel nœud sous[**Document Loader**](../document-loaders/)catégorie + * ** Embeddings ** peut être connecté à n'importe quel nœud sous[**Embeddings** ](../embeddings/)catégorie + +
+ + + +5. Vérifiez de l'interface utilisateur Couchbase pour voir si les données ont été renversées avec succès! + +## Ressources + +* Intégrations Langchain Couchbase Vectorstore + * [Python](https://python.langchain.com/docs/integrations/vectorstores/couchbase/) + * [NodeJS](https://js.langchain.com/docs/integrations/vectorstores/couchbase/) +* Reportez-vous au[Couchbase Documentation](https://docs.couchbase.com/home/index.html)Pour en savoir plus sur Couchbase. diff --git a/fr/integrations/langchain/vector-stores/elastic.md b/fr/integrations/langchain/vector-stores/elastic.md new file mode 100644 index 00000000..757a9216 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/elastic.md @@ -0,0 +1,69 @@ +# Élastique + +## Condition préalable + +1. Vous pouvez utiliser le[official Docker image](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html)pour commencer, ou vous pouvez utiliser[Elastic Cloud](https://www.elastic.co/cloud/), Service cloud officiel d'Elastic. Dans ce guide, nous utiliserons la version cloud. +2. [Register](https://cloud.elastic.co/registration)un compte ou[login](https://cloud.elastic.co/login)avec le compte existant sur Elastic Cloud. + +
+ +3. Cliquez sur ** Créer un déploiement **. Ensuite, nommez votre déploiement et choisissez le fournisseur. + +
+ +4. Une fois le déploiement terminé, vous devriez être en mesure de voir les guides de configuration comme indiqué ci-dessous. Cliquez sur l'option ** Configuration de la recherche vectorielle **. + +
+ +5. Vous devriez maintenant voir la page ** de démarrage ** pour ** Vector Search **. + +
+ +6. Sur la barre gauche, cliquez sur ** Indices **. Ensuite, ** Créez un nouvel index **. + +
+ +7. Sélectionnez ** API ** Méthode d'ingestion + +
+ +8. Nommez votre nom d'index de recherche, puis ** Créer un index ** + +
+ +9. Une fois l'index créé, générez une nouvelle clé API, prenez note à la fois de la clé API générée et de l'URL + +
+ +2. Ajouter de nouveaux informations d'identification via ** API Elasticsearch ** + +
+ +3. Prenez la clé URL et API d'Elasticsearch, remplissez les champs + +
+ +4. Une fois les informations d'identification créées avec succès, vous pouvez démarrer les données + +
+ +
+ +5. Une fois que les données ont été renversées avec succès, vous pouvez la vérifier à partir du tableau de bord élastique: + +
+ +## Ressources + +* [LangChain JS Elastic](https://js.langchain.com/docs/integrations/vectorstores/elasticsearch) +* [Vector Search (kNN) Implementation Guide - API Edition](https://www.elastic.co/search-labs/blog/articles/vector-search-implementation-guide-api-edition) diff --git a/fr/integrations/langchain/vector-stores/faiss.md b/fr/integrations/langchain/vector-stores/faiss.md new file mode 100644 index 00000000..0e8647c2 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/faiss.md @@ -0,0 +1,13 @@ +--- +description: >- + Upsert embedded data and perform similarity search upon query using Faiss + library from Meta. +--- + +# Fais + +

nœud faish

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/vector-stores/in-memory-vector-store.md b/fr/integrations/langchain/vector-stores/in-memory-vector-store.md new file mode 100644 index 00000000..25a0364d --- /dev/null +++ b/fr/integrations/langchain/vector-stores/in-memory-vector-store.md @@ -0,0 +1,13 @@ +--- +description: >- + In-memory vectorstore that stores embeddings and does an exact, linear search + for the most similar embeddings. +--- + +# Magasin vectoriel en mémoire + +

Inmory Vector Store Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/vector-stores/milvus.md b/fr/integrations/langchain/vector-stores/milvus.md new file mode 100644 index 00000000..3bda25d7 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/milvus.md @@ -0,0 +1,13 @@ +--- +description: >- + Upsert embedded data and perform similarity search upon query using Milvus, + world's most advanced open-source vector database. +--- + +# Milvus + +

nœud milvus

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/vector-stores/mongodb-atlas.md b/fr/integrations/langchain/vector-stores/mongodb-atlas.md new file mode 100644 index 00000000..75d5fde5 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/mongodb-atlas.md @@ -0,0 +1,67 @@ +--- +description: >- + Upsert embedded data and perform similarity or mmr search upon query using + MongoDB Atlas, a managed cloud mongodb database. +--- + +# MongoDB Atlas + +

MongoDB Atlas nœud

+ +### Configuration de la cluster[​](https://js.langchain.com/docs/integrations/vectorstores/mongodb_atlas/#initial-cluster-configuration) + +Pour configurer un cluster MongoDB Atlas, allez au[MongoDB Atlas ](https://www.mongodb.com/)Site Web et inscrivez-vous si vous n'avez pas de compte. Lorsque vous y êtes invité, créez et nommez votre cluster, qui apparaîtra dans la section de la base de données. Ensuite, sélectionnez "** Browse Collections **" pour créer une nouvelle collection ou en utiliser une à partir des exemples de données fournies. + +{% hint style = "avertissement"%} +Assurez-vous que le cluster que vous créez est la version 7.0 ou supérieure. +{% EndHint%} + +### Création d'index + +Après avoir configuré votre cluster, l'étape suivante consiste à créer un index pour le champ de collection que vous avez l'intention de rechercher. + +1. Accédez à l'onglet ** Recherche ** ATLAS ** et cliquez sur ** Créer un index de recherche **. +2. SELECT ** ATLAS VECTOR Recherche - JSON Editor **, choisissez la base de données et la collection appropriées, puis collez ce qui suit dans la zone de texte: + +```json +{ + "fields": [ + { + "numDimensions": 1536, + "path": "embedding", + "similarity": "euclidean", + "type": "vector" + } + ] +} +``` + +Assurez-vous que le`numDimensions`La propriété correspond à la dimensionnalité des intérêts que vous utilisez. Par exemple, les incorporations de cohére ont généralement 1024 dimensions, tandis que les incorporations OpenAI ont 1536 par défaut. + +** Remarque: ** Le magasin vectoriel attend certaines valeurs par défaut, telles que: + +* Un nom d'index de`default` +* Un nom de champ de collection de`embedding` +* Un nom de champ de texte brut de`text` + +Assurez-vous d'initialiser le magasin vectoriel avec des noms de champ qui correspondent à votre schéma d'index et de collecte, comme indiqué dans l'exemple ci-dessus. + +Une fois cela fait, procédez pour construire l'index. + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} + +### Configuration de flux + +Faites glisser et déposez le MongoDB Atlas Vector Store et ajoutez un nouvel diplôme. Utilisez la chaîne de connexion fournie à partir du tableau de bord MongoDB ATLAS: + +
+ +Remplissez le reste des champs: + +
+ +Vous pouvez également configurer plus de détails à partir de paramètres supplémentaires: + +
diff --git a/fr/integrations/langchain/vector-stores/opensearch.md b/fr/integrations/langchain/vector-stores/opensearch.md new file mode 100644 index 00000000..c84d1a1b --- /dev/null +++ b/fr/integrations/langchain/vector-stores/opensearch.md @@ -0,0 +1,13 @@ +--- +description: >- + Upsert embedded data and perform similarity search upon query using + OpenSearch, an open-source, all-in-one vector database. +--- + +# Opeensearch + +

OpenSearch Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/vector-stores/pinecone.md b/fr/integrations/langchain/vector-stores/pinecone.md new file mode 100644 index 00000000..94e1eb0c --- /dev/null +++ b/fr/integrations/langchain/vector-stores/pinecone.md @@ -0,0 +1,61 @@ +--- +description: >- + Upsert embedded data and perform similarity search upon query using Pinecone, + a leading fully managed hosted vector database. +--- + +# Pignon + +## Condition préalable + +1. Enregistrer un compte pour[Pinecone](https://app.pinecone.io/) +2. Cliquez sur ** Créer un index ** + +
+ +3. Remplissez les champs requis: + - ** Nom de l'index **, nom de l'index à créer. (par exemple, "test fluide") + - ** Dimensions **, taille des vecteurs à insérer dans l'index. (par exemple 1536) + +
+ +4. Cliquez sur ** Créer un index ** + +## Installation + +1. Obtenez / créez votre ** clé API ** + +
+ +2. Ajoutez un nouveau nœud ** Pinecone ** à Canvas et remplissez les paramètres: + - Index de pince + - Espace de noms PineCone (facultatif) + +
+ +3. Créer un nouvel identifiant PineCone -> Remplissez ** Clé API ** + +
+ +4. Ajouter des nœuds supplémentaires à la toile et démarrer le processus ussert + - ** Document ** peut être connecté à n'importe quel nœud sous[**Document Loader**](../document-loaders/)catégorie + - ** Embeddings ** peut être connecté à n'importe quel nœud sous[**Embeddings** ](../embeddings/)catégorie + +
+ +
+ +5. Vérifier[Pinecone dashboard](https://app.pinecone.io)Pour voir si les données ont été renversées avec succès: + +
+ +6. + +## Ressources + +- Intégrations Langchain Pinecone Vectorstore + - [Python](https://python.langchain.com/v0.2/docs/integrations/providers/pinecone/) + - [NodeJS](https://js.langchain.com/v0.2/docs/integrations/vectorstores/pinecone) +- [Pinecone LangChain integration](https://docs.pinecone.io/integrations/langchain) +- [Pinecone Flowise integration](https://docs.pinecone.io/integrations/flowise) +- [Pinecone official clients](https://docs.pinecone.io/reference/pinecone-clients) diff --git a/fr/integrations/langchain/vector-stores/postgres.md b/fr/integrations/langchain/vector-stores/postgres.md new file mode 100644 index 00000000..7b87b756 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/postgres.md @@ -0,0 +1,46 @@ +--- +description: >- + Upsert embedded data and perform similarity search upon query using pgvector + on Postgres. +--- + +# Postgres + +

Node postgres

+ +Il existe plusieurs méthodes pour se connecter à Postgres en fonction de la configuration de votre instance. Vous trouverez ci-dessous un exemple de configuration locale à l'aide d'une image Docker prédéfinie fournie par l'équipe PGVector. + +Créer un fichier nommé`docker-compose.yml`avec le contenu ci-dessous: + +```yaml +# Run this command to start the database: +# docker-compose up --build +version: "3" +services: + db: + hostname: 127.0.0.1 + image: pgvector/pgvector:pg16 + ports: + - 5432:5432 + restart: always + environment: + - POSTGRES_DB=api + - POSTGRES_USER=myuser + - POSTGRES_PASSWORD=ChangeMe + volumes: + - ./init.sql:/docker-entrypoint-initdb.d/init.sql +``` + +`docker compose up`Pour démarrer le conteneur Postgres. + +Créez de nouvelles informations d'identification avec l'utilisateur et le mot de passe configurés: + +
+ +Remplissez le champ du nœud avec des valeurs configurées dans`docker-compose.yml`. Par exemple: + +* Hôte: ** LocalHost ** +* Base de données: ** API ** +* Port: ** 5432 ** + +Le tour est joué! Vous avez désormais réussi à configurer le vecteur postgres prêt à être utilisé. diff --git a/fr/integrations/langchain/vector-stores/qdrant.md b/fr/integrations/langchain/vector-stores/qdrant.md new file mode 100644 index 00000000..55abf31c --- /dev/null +++ b/fr/integrations/langchain/vector-stores/qdrant.md @@ -0,0 +1,75 @@ +# Qdrant + +## Condition préalable + +UN[locally running instance of Qdrant](https://qdrant.tech/documentation/quick-start/)ou une instance de cloud Qdrant. + +Pour obtenir une instance de cloud Qdrant: + +1. Dirigez-vous vers la section des grappes du[Cloud Dashboard](https://cloud.qdrant.io/overview). +2. Sélectionnez ** Clusters ** puis cliquez sur ** + Créer **. + +
+ +3. Choisissez vos configurations de cluster et votre région. +4. Appuyez sur ** Créer ** pour provisionner votre cluster. + +## Installation + +1. Obtenez / créez votre ** clé API ** à partir de la section ** Contrôle d'accès aux données ** de la[Cloud Dashboard](https://cloud.qdrant.io/overview). +2. Ajoutez un nouveau nœud ** QDRANT ** sur Canvas. +3. Créez de nouveaux informations d'identification QDRANT à l'aide de la clé API + +
+ +4. Entrez les informations requises dans le nœud ** qdrant **: + * URL du serveur Qdrant + * Nom de collection + +
+ +5. ** Document ** L'entrée peut être connectée à n'importe quel nœud sous[**Document Loader**](../document-loaders/)catégorie. +6. ** Embeddings ** L'entrée peut être connectée à n'importe quel nœud sous[**Embeddings**](../embeddings/)catégorie. + +## Filtration + +Disons que vous avez différents documents renversés, chacun spécifié avec une valeur unique sous la clé de métadonnées`{source}` + +
+ +
+ + + +
+ +
+ +Ensuite, vous voulez filtrer par cela. Qdrant prend en charge la suite[syntax](https://qdrant.tech/documentation/concepts/filtering/#nested-key)En ce qui concerne le filtrage: + +** ui ** + + + +** API ** + +```json +"overrideConfig": { + "qdrantFilter": { + "should": [ + { + "key": "metadata.source", + "match": { + "value": "apple" + } + } + ] + } +} +``` + +## Ressources + +* [Qdrant documentation](https://qdrant.tech/documentation/) +* [LangChain JS Qdrant](https://js.langchain.com/docs/integrations/vectorstores/qdrant) +* [Qdrant Filter](https://qdrant.tech/documentation/concepts/filtering/#nested-key) diff --git a/fr/integrations/langchain/vector-stores/redis.md b/fr/integrations/langchain/vector-stores/redis.md new file mode 100644 index 00000000..5b1a967a --- /dev/null +++ b/fr/integrations/langchain/vector-stores/redis.md @@ -0,0 +1,34 @@ +# Redis + +## Condition préalable + +1. Tourner un[Redis-Stack Server](https://redis.io/docs/latest/operate/oss_and_stack/install/install-stack/docker/)Utilisation de Docker + +```bash +docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest +``` + +## Installation + +1. Ajoutez un nouveau nœud ** redis ** sur toile. +2. Créez de nouveaux informations d'identification Redis. + +
+ +3. Sélectionnez le type d'identification Redis. Choisissez l'API Redis si vous avez un nom d'utilisateur et un mot de passe, sinon redis URL: + +
+ +4. Remplissez l'URL: + +
+ +5. Vous pouvez maintenant démarrer des données avec Redis: + +
+ +6. Accédez au portail Redis Insight et à votre base de données, vous pourrez voir toutes les données qui ont été renversées: + +
diff --git a/fr/integrations/langchain/vector-stores/singlestore.md b/fr/integrations/langchain/vector-stores/singlestore.md new file mode 100644 index 00000000..82b98b81 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/singlestore.md @@ -0,0 +1,62 @@ +# Singlestore + +## Installation + +1. Enregistrer un compte sur[SingleStore](https://www.singlestore.com/) +2. Connectez-vous vers le portail. Sur le panneau gauche, cliquez sur ** Cloud ** -> ** Créer un nouveau groupe d'espace de travail. ** puis cliquez sur ** Création du bouton Workspace **. + +
+ +3. Sélectionnez le fournisseur de cloud et la région de données, puis cliquez sur ** Suivant **: + +
+ +4. Revoir et cliquer sur ** Créer un espace de travail **: + +
+ +5. Vous devriez maintenant voir votre espace de travail créé: + +
+ +6. Procéder à la création d'une base de données + +
+ +7. Vous devriez pouvoir voir votre base de données créée et attachée à l'espace de travail: + +
+ +8. Cliquez sur Connecter dans la liste déroulante de l'espace de travail -> Connectez-vous directement: + +
+ +9. Vous pouvez spécifier un nouveau mot de passe ou utiliser celui généré par défaut. Puis cliquez sur Continuer: + +
+ +10. Dans les onglets, passez à ** votre application ** et sélectionnez ** node.js ** dans la liste déroulante. Prenez note / enregistrez le`Username`, `Host`, `Password`Comme vous en aurez besoin en flux plus tard. + +
+ +11. Retour à la toile fluide, glisser et déposer les nœuds singlestore. Cliquez sur ** Créer un nouveau ** à partir de la liste déroulante des informations d'identification: + +
+ +11. Mettez le nom d'utilisateur et le mot de passe d'en haut: + +
+ +13. Spécifiez ensuite le nom de l'hôte et de la base de données: + +
+ +13. Vous pouvez maintenant démarrer des données avec Singlestore: + +
+ +
+ +14. Revenez vers le portail Singlestore et dans votre base de données, vous pourrez voir toutes les données qui ont été renversées: + +
diff --git a/fr/integrations/langchain/vector-stores/supabase.md b/fr/integrations/langchain/vector-stores/supabase.md new file mode 100644 index 00000000..cbf3a184 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/supabase.md @@ -0,0 +1,164 @@ +# Supabase + +## Condition préalable + +1. Enregistrer un compte pour[Supabase](https://supabase.com/) +2. Cliquez sur ** Nouveau projet ** + +
+ +3. Entrée champs requis + +| Nom du champ | Description | +| ------------------------- | ------------------------------------------------- | +| ** Nom ** | Nom du projet à créer. (par exemple Flowise) | +| ** Base de données ** ** Mot de passe ** | Mot de passe à votre base de données Postgres | + +
+ +4. Cliquez sur ** Créer un nouveau projet ** et attendez que le projet termine la configuration +5. Cliquez sur ** Éditeur SQL ** + +
+ +6. Cliquez sur ** Nouvelle requête ** + +
+ +7. Copiez et collez la requête SQL ci-dessous et exécutez-la par`Ctrl + Enter`ou cliquez sur ** Exécuter **. Prenez note du nom de la table et du nom de la fonction. + +* ** Nom de la table **:`documents` +* ** Nom de la requête **:`match_documents` + +```plsql +-- Enable the pgvector extension to work with embedding vectors +create extension vector; + +-- Create a table to store your documents +create table documents ( + id bigserial primary key, + content text, -- corresponds to Document.pageContent + metadata jsonb, -- corresponds to Document.metadata + embedding vector(1536) -- 1536 works for OpenAI embeddings, change if needed +); + +-- Create a function to search for documents +create function match_documents ( + query_embedding vector(1536), + match_count int DEFAULT null, + filter jsonb DEFAULT '{}' +) returns table ( + id bigint, + content text, + metadata jsonb, + similarity float +) +language plpgsql +as $ +#variable_conflict use_column +begin + return query + select + id, + content, + metadata, + 1 - (documents.embedding <=> query_embedding) as similarity + from documents + where metadata @> filter + order by documents.embedding <=> query_embedding + limit match_count; +end; +$; + +``` + +Si certains cas, vous pouvez utiliser[Record Manager](../record-managers.md)pour garder une trace des upserts et éviter les duplications. Étant donné que Record Manager génère un UUID aléatoire pour chaque intégration, vous devrez modifier l'entité de la colonne d'ID en texte: + +```sql +-- Enable the pgvector extension to work with embedding vectors +create extension vector; + +-- Create a table to store your documents +create table documents ( + id text primary key, -- CHANGE TO TEXT + content text, + metadata jsonb, + embedding vector(1536) +); + +-- Create a function to search for documents +create function match_documents ( + query_embedding vector(1536), + match_count int DEFAULT null, + filter jsonb DEFAULT '{}' +) returns table ( + id text, -- CHANGE TO TEXT + content text, + metadata jsonb, + similarity float +) +language plpgsql +as $ +#variable_conflict use_column +begin + return query + select + id, + content, + metadata, + 1 - (documents.embedding <=> query_embedding) as similarity + from documents + where metadata @> filter + order by documents.embedding <=> query_embedding + limit match_count; +end; +$; + +``` + +
+ +## Installation + +1. Cliquez sur ** Paramètres du projet ** + +
+ +2. Obtenez votre ** URL du projet et la clé API ** + +
+ +3. Copiez et collez chaque détail (clé _API, URL, nom de table, nom de requête _) dans ** Supabase ** Node + +
+ +4. ** Document ** peut être connecté à n'importe quel nœud sous[**Document Loader**](../document-loaders/)catégorie +5. ** Embeddings ** peut être connecté à n'importe quel nœud sous[**Embeddings** ](../embeddings/)catégorie + +## Filtration + +Disons que vous avez différents documents renversés, chacun spécifié avec une valeur unique sous la clé de métadonnées`{source}` + +
+ +Vous pouvez utiliser le filtrage des métadonnées pour interroger les métadonnées spécifiques: + +** ui ** + +
+ +** API ** + +```json +"overrideConfig": { + "supabaseMetadataFilter": { + "source": "henry" + } +} +``` + +## Ressources + +* [LangChain JS Supabase](https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase) +* [Supabase Blog Post](https://supabase.com/blog/openai-embeddings-postgres-vector) +* [Metadata Filtering](https://js.langchain.com/docs/integrations/vectorstores/supabase#metadata-filtering) diff --git a/fr/integrations/langchain/vector-stores/upstash-vector.md b/fr/integrations/langchain/vector-stores/upstash-vector.md new file mode 100644 index 00000000..514040d1 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/upstash-vector.md @@ -0,0 +1,40 @@ +# Aubier + +## Préquis + +1. Inscrivez-vous ou connectez-vous à[Upstash Console](https://console.upstash.com) +2. Accédez à la page vectorielle et cliquez sur ** Créer un index ** +
+3. Effectuez les configurations nécessaires et créez l'index. + + 1. ** Nom de l'index **, nom de l'index à créer. (par exemple "Flowise Upstash-Demo") + 2. ** Dimensions **, taille des vecteurs à insérer dans l'index. (par exemple 1536) + 3. ** Modèle d'intégration **, le modèle à utiliser dans[Upstash Embeddings](https://upstash.com/docs/vector/features/embeddingmodels). C'est facultatif. Si vous l'activez, vous n'avez pas besoin de fournir un modèle d'intégration. + +
+ +## Installation + +1. Obtenez vos informations d'identification d'index + +
+ +1. Créez de nouveaux informations d'identification Upstash Vector et remplissez + 1. URL de repos vectoriel upstash de Upstash_Vector_Rest_url sur la console + 2. Jeton de repos vectoriel upstash de Upstash_Vector_Rest_Token sur la console + +
+ +1. Ajouter un nouveau nœud Vector ** Vector ** à toile + +
+ +1. Ajouter des nœuds supplémentaires à la toile et démarrer le processus ussert + - ** Document ** peut être connecté à n'importe quel nœud sous[**Document Loader**](../document-loaders/)catégorie + - ** Embeddings ** peut être connecté à n'importe quel nœud sous[**Embeddings** ](../embeddings/)catégorie + +
+ +1. Vérifier[Upstash dashboard](https://console.upstash.com)Pour voir si les données ont été mises à jour avec succès: + +
diff --git a/fr/integrations/langchain/vector-stores/vectara.md b/fr/integrations/langchain/vector-stores/vectara.md new file mode 100644 index 00000000..be583610 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/vectara.md @@ -0,0 +1,55 @@ +# Vectara + +## Tutoriel QuickStart + +{% embed url = "https://www.youtube.com/watch?v=rbqpvfcd5xy"%} + +## Condition préalable + +1. Enregistrer un compte pour[Vectara](https://vectara.com/integrations/flowise) +2. Cliquez sur ** Créer un corpus ** + +
+ +Nommez le corpus à créer et cliquez sur ** Créer un corpus ** puis attendez que le corpus termine la configuration. + +## Installation + +1. Cliquez sur l'onglet ** "Contrôle d'accès" ** dans la vue du corpus + +
+ +2. Cliquez sur le bouton ** "Créer la touche API" **, choisissez un nom pour la touche API et choisissez l'option ** QuerryService & indexService ** + +
+ +3. Cliquez sur ** Créer ** pour créer la touche API +4. Obtenez votre ID ** Corpus, votre clé API et votre ID client ** en cliquant sur la flèche vers le bas sous "Copie" pour votre nouvelle clé API: + +
+ +5. Retour à Flowise Canvas et créez votre ChatFlow. Cliquez sur ** Créer un nouveau ** à partir de la liste déroulante des informations d'identification ANE Entrez vos informations d'identification Vectara. + +
+ +6. Apprécier! + +## Paramètres de requête vectara + +Pour un contrôle plus fin sur les paramètres de requête Vectara, cliquez sur "** Paramètres supplémentaires **", puis vous pouvez mettre à jour les paramètres suivants à partir de leur défaut: + +* Filtre de métadonnées: Vectara prend en charge le filtrage des méta-données. Pour utiliser[filtering](https://docs.vectara.com/docs/common-use-cases/filtering-by-metadata/filter-overview), assurez-vous que les champs de métadonnées que vous souhaitez filtrer sont définis dans votre corpus vectara. +* "Sentiments avant" et "phrases après": Celles-ci contrôlent combien de phrases avant / après le texte de correspondance sont renvoyées en tant que résultats du moteur de récupération de Vectara +* Lambda: définit le comportement de[hybrid search](https://docs.vectara.com/docs/learn/hybrid-search)à Vectara +* Top-K: Combien de résultats retourner de Vectara pour la requête +* MMR-K: nombre de résultats à utiliser pour[MMR](https://docs.vectara.com/docs/api-reference/search-apis/reranking#maximal-marginal-relevance-mmr-reranker)(Relvance marginale maximale) + +
+ +## Ressources + +* [LangChain JS Vectara Blog Post](https://blog.langchain.dev/langchain-vectara-better-together/) +* [5 Reasons to Use Vectara's Langchain Integration Blog Post](https://vectara.com/5-reasons-to-use-vectaras-langchain-integration/) +* [Max Marginal Relevance in Vectara](https://vectara.com/blog/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/) +* [Vectara Boomerang embedding model Blog Post](https://vectara.com/introducing-boomerang-vectaras-new-and-improved-retrieval-model/) +* [Detecting Hallucination with Vectara's HHEM](https://vectara.com/blog/cut-the-bull-detecting-hallucinations-in-large-language-models/) diff --git a/fr/integrations/langchain/vector-stores/weaviate.md b/fr/integrations/langchain/vector-stores/weaviate.md new file mode 100644 index 00000000..da683d65 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/weaviate.md @@ -0,0 +1,42 @@ +--- +description: >- + Upsert embedded data and perform similarity or mmr search using Weaviate, a + scalable open-source vector database. +--- + +# Tisser + +

Tempsser le nœud

+ +## Filtration + +Terminer les supports suivants[syntax](https://weaviate.io/developers/weaviate/search/filters)En ce qui concerne le filtrage: + +** ui ** + +
+ +** API ** + +```json +"overrideConfig": { + "weaviateFilter": { + "where": { + "operator": "Equal", + "path": [ + "test" + ], + "valueText": "key" + } + } +} +``` + +## Ressources + +* [LangchainJS Weaviate](https://js.langchain.com/v0.1/docs/integrations/vectorstores/weaviate/#usage-query-documents) +* [Weaviate Filtering](https://weaviate.io/developers/weaviate/search/filters) + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/vector-stores/zep-collection-cloud.md b/fr/integrations/langchain/vector-stores/zep-collection-cloud.md new file mode 100644 index 00000000..cf76ef4d --- /dev/null +++ b/fr/integrations/langchain/vector-stores/zep-collection-cloud.md @@ -0,0 +1,13 @@ +--- +description: >- + Upsert embedded data and perform similarity or mmr search upon query using + Zep, a fast and scalable building block for LLM apps. +--- + +# Collection Zep - Cloud + +

Zep Collection - Cloud Node + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/langchain/vector-stores/zep-collection-open-source.md b/fr/integrations/langchain/vector-stores/zep-collection-open-source.md new file mode 100644 index 00000000..c48cd551 --- /dev/null +++ b/fr/integrations/langchain/vector-stores/zep-collection-open-source.md @@ -0,0 +1,13 @@ +--- +description: >- + Upsert embedded data and perform similarity or mmr search upon query using + Zep, a fast and scalable building block for LLM apps. +--- + +# Collection ZEP - open source + +

Collection Zep - Node d'ouverture

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/litellm/README.md b/fr/integrations/litellm/README.md new file mode 100644 index 00000000..740b64fa --- /dev/null +++ b/fr/integrations/litellm/README.md @@ -0,0 +1,97 @@ +--- +description: LangChain Record Manager Nodes +--- + +# Gestionnaires de dossiers + +*** + +Les gestionnaires d'enregistrement gardent une trace de vos documents indexés, empêchant les intégres du vecteur dupliqués dans[Vector Store](vector-stores/). + +Lorsque des morceaux de document augmentent, chaque morceau sera haché en utilisant[SHA-1](https://github.com/emn178/js-sha1)algorithme. Ces hachages seront stockés dans Record Manager. S'il y a un hachage existant, le processus d'incorporation et de mise en service sera ignoré. + +Dans certains cas, vous voudrez peut-être supprimer les documents existants dérivés des mêmes sources que les nouveaux documents indexés. Pour cela, il existe 3 modes de nettoyage pour le gestionnaire de disques: + +{% Tabs%} +{% tab title = "incrémentiel"%} +Lorsque vous augmentez plusieurs documents et que vous souhaitez empêcher la suppression des documents existants qui ne font pas partie du processus de mise en service actuel, utilisez le mode de nettoyage ** ** **. + +1. Ayons un gestionnaire de disques avec`Incremental`Nettoyage et`source`En tant que clé sourceid + +
src = "../../. Gitbook / Assets / Image (5) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) .png" alt = "" width = "410">
+ +2. Et avoir les 2 documents suivants: + +| Texte | Métadonnées | +| ---- | ---------------- | +| Cat |`{source:"cat"}` | +| Chien |`{source:"dog"}` | + +
+ +
+ +3. Après un upsert, nous verrons 2 documents qui sont mis en place: + +
+ +* Le document original ** Cat ** est supprimé +* Un nouveau document avec ** Cats ** est ajouté +* ** Dog ** Le document est laissé intact +* Les incorporations vectorielles restantes dans le magasin vectoriel sont ** chats ** et ** chien ** + +
+{% endtab%} + +{% tab title = "full"%} +Lorsque vous augmentez plusieurs documents, ** le mode de nettoyage complet ** supprimera automatiquement tous les intérêts vectoriels qui ne font pas partie du processus de mise en service actuel. + +1. Ayons un gestionnaire de disques avec`Full`Nettoyage. Nous n'avons pas besoin d'avoir une clé sourceid pour le mode de nettoyage complet. + +
src = "../../. GitBook / Assets / Image (17) (1) (1) (1) (2) .png" alt = "" width = "407">
+ +2. Et avoir les 2 documents suivants: + +| Texte | Métadonnées | +| ---- | ---------------- | +| Cat |`{source:"cat"}` | +| Chien |`{source:"dog"}` | + +
+ +
+ +3. Après un upsert, nous verrons 2 documents qui sont mis en place: + +
+ +* Le document original ** Cat ** est supprimé +* Un nouveau document avec ** Cats ** est ajouté +* ** Dog ** Le document est supprimé +* Les incorporations vectorielles restantes dans le magasin vectoriel sont juste ** Cats ** + +
- + Agent that uses Anthropic Function Calling to pick the tools and args to call + using LlamaIndex. +--- + +# Agent d'outils anthropiques + +
+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/agents/openai-tool-agent.md b/fr/integrations/llamaindex/agents/openai-tool-agent.md new file mode 100644 index 00000000..fdb1b301 --- /dev/null +++ b/fr/integrations/llamaindex/agents/openai-tool-agent.md @@ -0,0 +1,13 @@ +--- +description: >- + Agent that uses OpenAI Function Calling to pick the tools and args to call + using LlamaIndex. +--- + +# Agent d'outils Openai + +

Openai Tool Agent Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/chat-models/README.md b/fr/integrations/llamaindex/chat-models/README.md new file mode 100644 index 00000000..58709961 --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/README.md @@ -0,0 +1,19 @@ +--- +description: LlamaIndex Chat Model Nodes +--- + +# Modèles de chat + +*** + +Les modèles de chat prennent une liste de messages en entrée et renvoient un message généré par le modèle en tant que sortie. Ces modèles tels que ** GPT-3.5-turbo ** ou ** GPT4 ** sont puissants et moins chers que ses modèles de complétion prédécesseur tels que ** text-davincii-003 **. + +### Nœuds de modèle de chat: + +* [AzureChatOpenAI](azurechatopenai.md) +* [ChatAnthropic](chatanthropic.md) +* [ChatMistral](chatmistral.md) +* [ChatOllama](chatollama.md) +* [ChatOpenAI](chatopenai.md) +* [ChatTogetherAI](chattogetherai.md) +* [ChatGroq](chatgroq.md) diff --git a/fr/integrations/llamaindex/chat-models/azurechatopenai.md b/fr/integrations/llamaindex/chat-models/azurechatopenai.md new file mode 100644 index 00000000..cb45d933 --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/azurechatopenai.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around Azure OpenAI Chat LLM specific for LlamaIndex. +--- + +# Azurechatopenai + +

azurechatopenai nœud

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/chat-models/chatanthropic.md b/fr/integrations/llamaindex/chat-models/chatanthropic.md new file mode 100644 index 00000000..77858f7d --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/chatanthropic.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around ChatAnthropic LLM specific for LlamaIndex. +--- + +# Chatanthropic + + + +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/chat-models/chatgroq.md b/fr/integrations/llamaindex/chat-models/chatgroq.md new file mode 100644 index 00000000..3ebf58fa --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/chatgroq.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around Groq LLM specific for LlamaIndex. +--- + +# Chatroq + +

nœud chatgroq

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/chat-models/chatmistral.md b/fr/integrations/llamaindex/chat-models/chatmistral.md new file mode 100644 index 00000000..b218754d --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/chatmistral.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around ChatMistral LLM specific for LlamaIndex. +--- + +# Chat + +

nœud de chatRmistral

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/chat-models/chatollama.md b/fr/integrations/llamaindex/chat-models/chatollama.md new file mode 100644 index 00000000..5a78b10e --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/chatollama.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around ChatOllama LLM specific for LlamaIndex. +--- + +# Chatollame + +

nœud de chatollama

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/chat-models/chatopenai.md b/fr/integrations/llamaindex/chat-models/chatopenai.md new file mode 100644 index 00000000..4bb1bc59 --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/chatopenai.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around OpenAI Chat LLM specific for LlamaIndex. +--- + +# Chatopenai + +

Chatopenai Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/chat-models/chattogetherai.md b/fr/integrations/llamaindex/chat-models/chattogetherai.md new file mode 100644 index 00000000..80759a00 --- /dev/null +++ b/fr/integrations/llamaindex/chat-models/chattogetherai.md @@ -0,0 +1,11 @@ +--- +description: Wrapper around ChatTogetherAI LLM specific for LlamaIndex. +--- + +# Chattogetherai + +

ChattoGetherai Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/embeddings/README.md b/fr/integrations/llamaindex/embeddings/README.md new file mode 100644 index 00000000..746e9712 --- /dev/null +++ b/fr/integrations/llamaindex/embeddings/README.md @@ -0,0 +1,25 @@ +--- +description: LlamaIndex Embeddings Nodes +--- + +# Incorporer + +*** + +Une intégration est un vecteur (liste) de numéros de points flottants. La distance entre deux vecteurs mesure leur relation. De petites distances suggèrent une forte parenté et de grandes distances suggèrent une faible parenté. + +Les intégres peuvent être utilisés pour créer une représentation numérique des données textuelles. Cette représentation numérique est utile car elle peut être utilisée pour trouver des documents similaires. + +Ils sont couramment utilisés pour: + +* Recherche (où les résultats sont classés par pertinence pour une chaîne de requête) +* Clustering (où les chaînes de texte sont regroupées par similitude) +* Recommandations (où les éléments avec des chaînes de texte connexes sont recommandés) +* Détection d'anomalies (où les valeurs aberrantes avec peu de parenté sont identifiées) +* Mesure de la diversité (où les distributions de similitude sont analysées) +* Classification (où les chaînes de texte sont classées par leur étiquette la plus similaire) + +### Nœuds d'intégration: + +* [Azure OpenAI Embeddings](azure-openai-embeddings.md) +* [OpenAI Embedding](openai-embedding.md) diff --git a/fr/integrations/llamaindex/embeddings/azure-openai-embeddings.md b/fr/integrations/llamaindex/embeddings/azure-openai-embeddings.md new file mode 100644 index 00000000..dfc25a83 --- /dev/null +++ b/fr/integrations/llamaindex/embeddings/azure-openai-embeddings.md @@ -0,0 +1,11 @@ +--- +description: Azure OpenAI API embeddings specific for LlamaIndex. +--- + +# Azure Openai Embeddings + +
+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/embeddings/openai-embedding.md b/fr/integrations/llamaindex/embeddings/openai-embedding.md new file mode 100644 index 00000000..4e0c2349 --- /dev/null +++ b/fr/integrations/llamaindex/embeddings/openai-embedding.md @@ -0,0 +1,11 @@ +--- +description: OpenAI Embedding specific for LlamaIndex. +--- + +# Openai Incorpore + +

Openai Embedding Node

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/llamaindex/engine/README.md b/fr/integrations/llamaindex/engine/README.md new file mode 100644 index 00000000..3eec074f --- /dev/null +++ b/fr/integrations/llamaindex/engine/README.md @@ -0,0 +1,16 @@ +--- +description: LlamaIndex Engine Nodes +--- + +# Moteur + +*** + +Dans Llamaindex, un nœud moteur fait référence à deux composants clés qui gèrent le traitement de l'information et l'interaction utilisateur. + +### Nœuds de moteur: + +* [Query Engine](query-engine.md) +* [Simple Chat Engine](simple-chat-engine.md) +* [Context Chat Engine](context-chat-engine.md) +* [Sub-Question Query Engine](sub-question-query-engine.md) diff --git a/fr/integrations/llamaindex/engine/context-chat-engine.md b/fr/integrations/llamaindex/engine/context-chat-engine.md new file mode 100644 index 00000000..14039b27 --- /dev/null +++ b/fr/integrations/llamaindex/engine/context-chat-engine.md @@ -0,0 +1,24 @@ +# Moteur de chat contextuel + +Un moteur de chat sert de pipeline de bout en bout pour avoir une conversation humaine avec vos données, permettant plusieurs échanges plutôt qu'une seule interaction de questions et réponses. + +
+ +## Entrées + +* Modèle de chat +* Retriever du magasin vectoriel +* [Memory](../../langchain/memory/) + +## Paramètres + +| Nom | Description | +| ----------------------- | ------------------------------------------------------------------- | +| Retour des documents source | Pour retourner les citations / sources qui ont été utilisées pour construire la réponse | +| Message système | Une instruction pour LLM sur la façon de répondre à la requête | + +## Sorties + +| Nom | Description | +| ----------------- | ----------------------------- | +| ContextChatengine | Node final pour retourner la réponse | diff --git a/fr/integrations/llamaindex/engine/query-engine.md b/fr/integrations/llamaindex/engine/query-engine.md new file mode 100644 index 00000000..b673c8c5 --- /dev/null +++ b/fr/integrations/llamaindex/engine/query-engine.md @@ -0,0 +1,22 @@ +# Moteur de requête + +Un moteur de requête sert de pipeline de bout en bout permet aux utilisateurs de poser des questions sur leurs données. Il reçoit une requête en langue naturelle et fournit une réponse, accompagnée d'informations de contexte pertinentes récupérées et transmises au LLM (modèle de grande langue). + +
+ +## Entrées + +* Retriever du magasin vectoriel +* [Response Synthesizer](../response-synthesizer/) + +## Paramètres + +| Nom | Description | +| ----------------------- | ------------------------------------------------------------------- | +| Retour des documents source | Pour retourner les citations / sources qui ont été utilisées pour construire la réponse | + +## Sorties + +| Nom | Description | +| ----------- | ----------------------------- | +| QueryEngine | Node final pour retourner la réponse | diff --git a/fr/integrations/llamaindex/engine/simple-chat-engine.md b/fr/integrations/llamaindex/engine/simple-chat-engine.md new file mode 100644 index 00000000..83b61ab8 --- /dev/null +++ b/fr/integrations/llamaindex/engine/simple-chat-engine.md @@ -0,0 +1,22 @@ +# Moteur de chat simple + +Un moteur de chat simple fonctionne comme un pipeline complet pour s'engager dans un dialogue entre l'IA et l'utilisateur, sans récupération de contexte. Cependant, il est équipé de[Memory](../../langchain/memory/), permettant de se souvenir des conversations. + +
+ +## Entrées + +* Modèle de chat +* [Memory](../../langchain/memory/) + +## Paramètres + +| Nom | Description | +| -------------- | --------------------------------------------- | +| Message système | Une instruction pour LLM sur la façon de répondre à la requête | + +## Sorties + +| Nom | Description | +| ---------------- | ----------------------------- | +| Simplechatégine | Node final pour retourner la réponse | diff --git a/fr/integrations/llamaindex/engine/sub-question-query-engine.md b/fr/integrations/llamaindex/engine/sub-question-query-engine.md new file mode 100644 index 00000000..a9473290 --- /dev/null +++ b/fr/integrations/llamaindex/engine/sub-question-query-engine.md @@ -0,0 +1,24 @@ +# Moteur de requête de sous-question + +Un moteur de requête conçu pour résoudre le problème de la réponse à une requête complexe à l'aide de plusieurs sources de données. Il décompose d'abord la requête complexe en sous-questions pour chaque source de données pertinente, puis recueille toutes les repres intermédiaires et synthétise une réponse finale. + +
+ +## Entrées + +* Outils de moteur de requête +* Modèle de chat +* Incorporer +* [Response Synthesizer](../response-synthesizer/) + +## Paramètres + +| Nom | Description | +| ----------------------- | ------------------------------------------------------------------- | +| Retour des documents source | Pour retourner les citations / sources qui ont été utilisées pour construire la réponse | + +## Sorties + +| Nom | Description | +| ---------------------- | ----------------------------- | +| SUBESTIONQUESQUEURYEngine | Node final pour retourner la réponse | diff --git a/fr/integrations/llamaindex/response-synthesizer/README.md b/fr/integrations/llamaindex/response-synthesizer/README.md new file mode 100644 index 00000000..6ff86632 --- /dev/null +++ b/fr/integrations/llamaindex/response-synthesizer/README.md @@ -0,0 +1,16 @@ +--- +description: LlamaIndex Response Synthesizer Nodes +--- + +# Synthétiseur de réponse + +*** + +Les nœuds de synthétiseur de réponse sont responsables de l'envoi de la requête, des nœuds et des modèles invite au LLM pour générer une réponse. Il existe 4 modes pour générer une réponse: + +### Nœuds de synthétiseur: + +* [Refine](refine.md) +* [Compact and Refine](compact-and-refine.md) +* [Simple Response Builder](simple-response-builder.md) +* [Tree Summarize](tree-summarize.md) diff --git a/fr/integrations/llamaindex/response-synthesizer/compact-and-refine.md b/fr/integrations/llamaindex/response-synthesizer/compact-and-refine.md new file mode 100644 index 00000000..63e0b2a2 --- /dev/null +++ b/fr/integrations/llamaindex/response-synthesizer/compact-and-refine.md @@ -0,0 +1,36 @@ +# Compact et affiner + +Il s'agit de la valeur par défaut lorsqu'aucun synthétiseur de réponse n'est défini en explile. + +Compacte l'invite lors de chaque appel LLM en remplissant autant de morceaux de texte qui peuvent s'adapter à la taille invite maximale. S'il y a trop de morceaux pour faire des trucs dans une invite, "Créez et affinez" une réponse en passant par plusieurs invites compactes. + +** Pros **: le même que[Refine](refine.md), Bon pour les réponses plus détaillées, et devrait entraîner des appels LLM moins + +** CONS **: En raison des multiples appels LLM, peut être coûteux + +
+ +** Affiner l'invite ** + +```markup +The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer: +``` + +** Texte du texte QA Invite ** + +``` +Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer: +``` diff --git a/fr/integrations/llamaindex/response-synthesizer/refine.md b/fr/integrations/llamaindex/response-synthesizer/refine.md new file mode 100644 index 00000000..990c2aa9 --- /dev/null +++ b/fr/integrations/llamaindex/response-synthesizer/refine.md @@ -0,0 +1,34 @@ +# Affiner + +Créez et affinez une réponse en parcourant séquentiellement chaque morceau de texte récupéré. + +** Pros **: Bon pour les réponses plus détaillées + +** CONS **: appel LLM séparé par nœud (peut être cher) + +
+ +** Affiner l'invite ** + +```markup +The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer: +``` + +** Texte du texte QA Invite ** + +``` +Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer: +``` diff --git a/fr/integrations/llamaindex/response-synthesizer/simple-response-builder.md b/fr/integrations/llamaindex/response-synthesizer/simple-response-builder.md new file mode 100644 index 00000000..f5578da5 --- /dev/null +++ b/fr/integrations/llamaindex/response-synthesizer/simple-response-builder.md @@ -0,0 +1,9 @@ +# Builder de réponse simple + +À l'aide d'une collection de segments de texte et d'une requête, exécutez la requête sur chaque segment, en rassemblant les réponses dans un tableau. Renvoie une chaîne combinée contenant toutes les réponses. + +** PROS **: utile pour interroger individuellement chaque segment de texte avec la même requête + +** CONS **: Pas adapté à une réponse complexe et détaillée + +
diff --git a/fr/integrations/llamaindex/response-synthesizer/tree-summarize.md b/fr/integrations/llamaindex/response-synthesizer/tree-summarize.md new file mode 100644 index 00000000..aaef2c06 --- /dev/null +++ b/fr/integrations/llamaindex/response-synthesizer/tree-summarize.md @@ -0,0 +1,21 @@ +# Arbre résumer + +Lorsqu'il est fourni avec des morceaux de texte et une requête, construisez récursivement une structure d'arbre et renvoyez le nœud racine en conséquence. + +** PROS **: bénéfique pour les tâches de résumé + +** CONS **: La précision de la réponse peut être perdue lors de la traversée de la structure des arbres + +
+ +**Rapide** + +``` +Context information from multiple sources is below. +--------------------- +{context} +--------------------- +Given the information from multiple sources and not prior knowledge, answer the query. +Query: {query} +Answer: +``` diff --git a/fr/integrations/llamaindex/tools/README.md b/fr/integrations/llamaindex/tools/README.md new file mode 100644 index 00000000..923f1a96 --- /dev/null +++ b/fr/integrations/llamaindex/tools/README.md @@ -0,0 +1,13 @@ +--- +description: LlamaIndex Agent Nodes +--- + +# Outils + +*** + +Les outils sont des fonctions que les agents peuvent utiliser pour interagir avec le monde. Ces outils peuvent être des services publics génériques (par exemple, recherche), d'autres chaînes ou même d'autres agents. + +### Nœuds d'outil: + +* [Query Engine Tool](query-engine-tool.md) diff --git a/fr/integrations/llamaindex/tools/query-engine-tool.md b/fr/integrations/llamaindex/tools/query-engine-tool.md new file mode 100644 index 00000000..c009bdaf --- /dev/null +++ b/fr/integrations/llamaindex/tools/query-engine-tool.md @@ -0,0 +1,22 @@ +# Outil de moteur de requête + +Transforme le moteur de requête en un outil qui peut ensuite être utilisé par[Sub-Question Query Engine](../engine/sub-question-query-engine.md)ou agent. + +
+ +## Entrées + +* Index du magasin vectoriel + +## Paramètres + +| Nom | Description | +| ---------------- | --------------------------------------------------- | +| Nom de l'outil | Nom de l'outil | +| Description de l'outil | Une description pour dire quand LLM doit utiliser cet outil | + +## Sorties + +| Nom | Description | +| --------------- | ------------------------------------------------------------------------------------------------ | +| QueryEnginetool | Point de connexion à l'agent ou[Sub-Question Query Engine](../engine/sub-question-query-engine.md) | diff --git a/fr/integrations/llamaindex/vector-stores/README.md b/fr/integrations/llamaindex/vector-stores/README.md new file mode 100644 index 00000000..aa79e6e3 --- /dev/null +++ b/fr/integrations/llamaindex/vector-stores/README.md @@ -0,0 +1,18 @@ +--- +description: LlamaIndex Vector Store Nodes +--- + +# Magasins vectoriels + +*** + +Un magasin vectoriel ou une base de données vectorielle fait référence à un type de système de base de données spécialisée dans le stockage et la récupération des vecteurs numériques de haute dimension. Les magasins vectoriels sont conçus pour gérer et indexer efficacement ces vecteurs, permettant des recherches de similitude rapides. + +### Regardez une introduction sur les magasins vectoriels et comment vous pouvez l'utiliser sur Flowise + +{% embed url = "https://youtu.be/m0nr1_pnaxc"%} + +### Nœuds de magasin vectoriel: + +* [Pinecone](pinecone.md) +* [SimpleStore](queryengine-tool.md) diff --git a/fr/integrations/llamaindex/vector-stores/pinecone.md b/fr/integrations/llamaindex/vector-stores/pinecone.md new file mode 100644 index 00000000..f7017811 --- /dev/null +++ b/fr/integrations/llamaindex/vector-stores/pinecone.md @@ -0,0 +1,55 @@ +--- +description: >- + Upsert embedded data and perform similarity search upon query using Pinecone, + a leading fully managed hosted vector database. +--- + +# Pignon + +## Condition préalable + +1. Enregistrer un compte pour[Pinecone](https://app.pinecone.io/) +2. Cliquez sur ** Créer un index ** + +
+ +3. Remplissez les champs requis: + * ** Nom de l'index **, nom de l'index à créer. (par exemple, "test fluide") + * ** Dimensions **, taille des vecteurs à insérer dans l'index. (par exemple 1536) + +
+ +4. Cliquez sur ** Créer un index ** + +## Installation + +1. Obtenez / créez votre ** clé API ** + +
+ +2. Ajoutez un nouveau nœud ** Pinecone ** à Canvas et remplissez les paramètres: + * Index de pince + * Espace de noms PineCone (facultatif) + +

Pinecone Node

+ +3. Créer un nouvel identifiant PineCone -> Remplissez ** Clé API ** + +
+ +4. Ajouter des nœuds supplémentaires à la toile et démarrer le processus ussert + * ** Document ** peut être connecté à n'importe quel nœud sous[**Document Loader**](../../langchain/document-loaders/)catégorie + +{% hint style = "info"%} +Les chargeurs de documents et les séparateurs de texte pour Llamaindex ne sont pas encore disponibles, mais l'utilisation de l'une de celles disponibles sous Langchain permettra toujours la question avec Llamaindex comme d'habitude. +{% EndHint%} + +\ - \ * \ * Embeddings \ * \ * peut être connecté à n'importe quel nœud sous \ [\ * \ * Embeddings \ * \ *] \ (../ Embeddings /) + +
+ +
+ +5. Vérifier sur[Pinecone dashboard](https://app.pinecone.io)Ces données ont été renversées avec succès: + +
diff --git a/fr/integrations/llamaindex/vector-stores/queryengine-tool.md b/fr/integrations/llamaindex/vector-stores/queryengine-tool.md new file mode 100644 index 00000000..171ec197 --- /dev/null +++ b/fr/integrations/llamaindex/vector-stores/queryengine-tool.md @@ -0,0 +1,11 @@ +--- +description: Upsert embedded data to local path and perform similarity search. +--- + +# Plus simple + +

nœud simplesore

+ +{% hint style = "info"%} +Cette section est un travail en cours. Nous apprécions toute aide que vous pouvez fournir pour terminer cette section. Veuillez vérifier notre[Contribution Guide](broken-reference)Pour commencer. +{% EndHint%} diff --git a/fr/integrations/utilities/README.md b/fr/integrations/utilities/README.md new file mode 100644 index 00000000..50617d6b --- /dev/null +++ b/fr/integrations/utilities/README.md @@ -0,0 +1,16 @@ +--- +description: Learn how to use Flowise utility nodes +--- + +# Utilities + +*** + +Utility nodes are development tools that help you implement custom JS, logic and notes in your flows. + +### Available Utilities: + +* [Custom JS Function](custom-js-function.md) +* [Set/Get Variable](set-get-variable.md) +* [If Else](if-else.md) +* [Sticky Note](sticky-note.md) diff --git a/fr/integrations/utilities/custom-js-function.md b/fr/integrations/utilities/custom-js-function.md new file mode 100644 index 00000000..2583bae1 --- /dev/null +++ b/fr/integrations/utilities/custom-js-function.md @@ -0,0 +1,11 @@ +--- +description: Execute custom javascript function. +--- + +# Custom JS Function + +

Custom JS Function Node

+ +{% hint style="info" %} +This section is a work in progress. We appreciate any help you can provide in completing this section. Please check our [Contribution Guide](broken-reference) to get started. +{% endhint %} diff --git a/fr/integrations/utilities/if-else.md b/fr/integrations/utilities/if-else.md new file mode 100644 index 00000000..e0908b7c --- /dev/null +++ b/fr/integrations/utilities/if-else.md @@ -0,0 +1,56 @@ +# If Else + +Flowise allows you to split your chatflow into different branches depending on If/Else condition. + +
+ +### Input Variables + +
+ +As noticed from the image above, it takes in any nodes that has `json` output. Some examples are: Custom Function, LLM Chain Output Prediction, Get/Set Variables. + +
+ +You can then give a variable name: + +
+ +This variable can then be used in the [If Function](if-else.md#if-function) and [Else Function](if-else.md#else-function) with the prefix `$`. For example: + +``` +$output +``` + +### If Else Name + +You can name the node for easier visualization of what it does. + +### If Function + +This is a piece of JS code that is ran on Node sandbox. It must: + +* Contains the `if` statement +* Returns a value within `if` statement + +
+ +This gives much more flexibility for users to do complex comparison like regex, date comparsion and many more. + +### Else Function + +Similar to If Function, it must returns a value. This function will only be ran if the [If Function](if-else.md#if-function) does not return a value. + +
+ +### Output + +
+ +When the [If Function](if-else.md#if-function) successfully returns a value, it will be passed to the **True** output dot as shown above. This allow users to pass the value to the next node. + +Otherwise, the returned value from [Else Function](if-else.md#else-function) will be passed to the **False** output dot. + +User can also take a look at the If Else template in the marketplace: + +
diff --git a/fr/integrations/utilities/set-get-variable.md b/fr/integrations/utilities/set-get-variable.md new file mode 100644 index 00000000..3c6d13eb --- /dev/null +++ b/fr/integrations/utilities/set-get-variable.md @@ -0,0 +1,17 @@ +# Set/Get Variable + +If you are running a Custom Function, or LLM Chain, you might want to reuse the result in other nodes without having to recompute/rerun the same thing again. You can save the output result as a variable, and reuse it for other nodes that is further down the flow path. + +
+ +### Set Variable + +Taking inputs from any node that outputs `string, number, boolean, json, array,` we can assign a variable name to it. + +
+ +### Get Variable + +You can get the variable value from the variable name at a later stage: + +
diff --git a/fr/integrations/utilities/sticky-note.md b/fr/integrations/utilities/sticky-note.md new file mode 100644 index 00000000..ef1b608c --- /dev/null +++ b/fr/integrations/utilities/sticky-note.md @@ -0,0 +1,11 @@ +--- +description: Add a sticky note to the flow. +--- + +# Sticky Note + +
+ +{% hint style="info" %} +This section is a work in progress. We appreciate any help you can provide in completing this section. Please check our [Contribution Guide](broken-reference) to get started. +{% endhint %} diff --git a/fr/migration-guide/README.md b/fr/migration-guide/README.md new file mode 100644 index 00000000..b9427c17 --- /dev/null +++ b/fr/migration-guide/README.md @@ -0,0 +1,18 @@ +--- +description: Learn about legacy versions of Flowise +--- + +# Guide de migration + +*** + +Flowise offre généralement une compatibilité vers l'arrière, ce qui signifie que ses mises à jour suivent un chemin de développement continu. Cependant, il peut parfois y avoir des changements de rupture. + +Cette section fournit des conseils lors de la migration vers différentes versions de rupture. + +## Versions + +* [Cloud Migration](cloud-migration.md) +* [v1.3.0 Migration Guide](v1.3.0-migration-guide.md) +* [v1.4.3 Migration Guide](v1.4.3-migration-guide.md) +* [v2.1.4 Migration Guide](v2.1.4-migration-guide.md) diff --git a/fr/migration-guide/cloud-migration.md b/fr/migration-guide/cloud-migration.md new file mode 100644 index 00000000..cae4a979 --- /dev/null +++ b/fr/migration-guide/cloud-migration.md @@ -0,0 +1,72 @@ +# Migration Cloud + +Ce guide consiste à aider les utilisateurs à migrer du cloud V1 vers V2. + +Dans Cloud V1, l'URL des applications ressemble à ** https: // \ .app.flowiseai.com ** + +Dans Cloud V2, l'URL des applications est ** https: //cloud.flowiseai.com** + +Pourquoi Cloud V2? Nous avons réécrit un nuage à partir de zéro, qui a une amélioration de la vitesse 5X, la capacité d'avoir plusieurs espaces de travail, des membres de l'organisation et, surtout, il est très évolutif avec[production-ready architecture](../configuration/running-in-production.md). + +1. Connectez-vous à Cloud V1 via[https://flowiseai.com/auth/login](https://flowiseai.com/auth/login) +2. Dans votre tableau de bord, dans le coin supérieur droit: + +
+ +3. ** Sélectionnez la version, puis mettez à jour vers la dernière version. ** + +
+ +4. Sélectionnez Exporter, sélectionnez les données que vous souhaitez exporter: + +
+ +5. Enregistrez le fichier JSON exporté. +6. Accédez à Cloud V2[https://cloud.flowiseai.com](https://cloud.flowiseai.com/) +7. Le compte Cloud V2 ne se synchronise pas avec votre compte existant dans Cloud V1, vous devrez vous inscrire à nouveau ou vous connecter avec Google / GitHub. + +
+ +8. Une fois connecté, dans le coin du tableau de bord en haut à droite, cliquez sur Importer et téléchargez le fichier JSON exporté. + +
+ +9. Le nouvel utilisateur par défaut est sur le ** plan gratuit ** avec une limitation de 2 flux et assistants (pour chacun). Si vos données exportées en ont plus que cela, l'importation du fichier JSON exporté lancera une erreur. C'est pourquoi nous donnons ** Premier mois gratuit ** sur ** Plan de démarrage ** qui a des flux et assistants illimités! + +
+ +10. Cliquez sur le bouton ** démarrage ** et ajoutez votre mode de paiement préféré: + +
+ +
+ +11. Après le mode de paiement supplémentaire, reviendrez pour couler, cliquez sur Démarrer sur le plan sélectionné et confirmez la modification: + +
+ +12. Si tout se passe bien, vous devriez être sur le plan de démarrage avec des flux et assistants illimités! Hourra: Tada: essayez à nouveau d'importer le fichier JSON s'il échouait précédemment en raison de la limitation du plan libre. + +{% Hint Style = "Success"%} +Tous les ID des données exportées restent les mêmes, vous n'avez donc pas à vous soucier de la mise à jour de l'ID pour l'API, il vous suffit de mettre à jour l'URL comme[https://cloud.flowiseai.com/api/v1/prediction/69fb1055-ghj324-ghj-0a4ytrerf](https://cloud.flowiseai.com/api/v1/prediction/69fb1055-ghj324-ghj-0a4ytrerf) +{% EndHint%} + +{% hint style = "avertissement"%} +Les informations d'identification ne sont pas exportées. Vous devrez créer de nouvelles informations d'identification et les utiliser dans les flux et les assistants. +{% EndHint%} + +13. Après avoir vérifié que tout fonctionne comme prévu, vous pouvez désormais annuler l'abonnement Cloud V1. +14. Dans le panneau du côté gauche, cliquez sur Paramètres du compte, faites défiler vers le bas et vous verrez ** Annuler l'abonnement précédent **: + +
+ +15. Entrez votre e-mail précédent qui a été utilisé pour inscrire le Cloud V1 et appuyez sur ** Envoyer des instructions **. +16. Vous recevrez ensuite un e-mail pour annuler votre abonnement précédent: + +
+ +17. Cliquez sur le bouton ** Gérer l'abonnement ** vous amènera à un portail où vous pouvez annuler l'abonnement Cloud V1. Votre application Cloud V1 sera ensuite fermée sur le prochain cycle de facturation. + +
+ +Nous nous excusons sincèrement pour tout inconvénient que nous avons causé au cours du processus de migration. Si quelque chose, nous aimerions aider, n'hésitez pas à nous joindre à support@flowiseai.com. diff --git a/fr/migration-guide/v1.3.0-migration-guide.md b/fr/migration-guide/v1.3.0-migration-guide.md new file mode 100644 index 00000000..10e727a5 --- /dev/null +++ b/fr/migration-guide/v1.3.0-migration-guide.md @@ -0,0 +1,38 @@ +--- +description: In v1.3.0, we introduced Credentials +--- + +# V1.3.0 Guide de migration + +*** + +Les informations d'identification permettent à l'utilisateur de stocker toutes les clés d'API tiers dans la base de données et peut être facilement réutilisée sur les nœuds respectifs, sans avoir à copier le collage à chaque fois. + +Les informations d'identification sont chiffrées par une clé de chiffrement créée à l'aide d'une phrase de passe. Seul l'utilisateur qui a accès à la clé peut chiffrer / décrypter les informations d'identification. De plus, les titres de compétences décryptés ne seront jamais renvoyés au client pour éviter l'usurpation du réseau. + +Voici quelques guides importants pour vous aider à migrer vers la V1.3.0: + +1. Ensemble`PASSPHRASE`Env variable. Ceci est utilisé pour générer une clé de chiffrement utilisée pour crypter / déchiffrer vos informations d'identification +2. Ensemble`SECRETKEY_PATH`Env variable. Pour persister votre clé de cryptage, spécifiez l'emplacement où la clé de chiffrement est en cours. + +Un typique`.env`Le fichier devrait ressembler à ceci: + +```sh +PORT=3000 +PASSPHRASE=MYPASSPHRASE +DATABASE_PATH=/root/.flowise +SECRETKEY_PATH=/root/.flowise +LOG_PATH=/root/.flowise/logs +``` + +3. Version de nœud. Il y aura un message d'avertissement affiché en haut à droite d'un nœud si la version est obsolète. Cela signifie qu'il y a de nouvelles modifications sur le nœud, et vous devez le supprimer et le réadapter de la liste des menues. + +
+ +C'est ça! Faites-nous savoir si vous avez rencontré des problèmes. Bonne mise à niveau! + +## Tutoriel vidéo + +Dans ce didacticiel vidéo, Leon montre comment configurer des informations d'identification sur Flowise. + +{% embed url = "https://youtu.be/32dfgpdymcs"%} diff --git a/fr/migration-guide/v1.4.3-migration-guide.md b/fr/migration-guide/v1.4.3-migration-guide.md new file mode 100644 index 00000000..d589fd16 --- /dev/null +++ b/fr/migration-guide/v1.4.3-migration-guide.md @@ -0,0 +1,42 @@ +--- +description: In v1.4.3, we introduced a unified Vector Store node +--- + +# V1.4.3 Guide de migration + +*** + +## Avant + +Auparavant, les utilisateurs devraient créer 2 flux pour effectuer un upsert et une requête: + +### Ascension + + 
+
+### Charge existant
+
+<Figure> <img src =
+ +Avec cette technique, il y a 2 inconvénients: + +* L'appel LLM supplémentaire devra être effectué pour que l'upsert se produise +* Toute légère modification provoquera à nouveau le débit + +## Après + +Maintenant, l'utilisateur peut simplement utiliser un nœud pour accomplir tout: + +
+ +
+ +Il est livré avec la nouvelle API -`/api/v1/vector/upsert`: + +
+ +À l'avenir, nous allons déployer une fonctionnalité pour interroger, supprimer les index. Il s'agit de la première étape vers une manière plus flexible d'effectuer des opérations liées aux vecteurs. Nous recommandons fortement aux utilisateurs de mettre à jour les nouveaux nœuds. diff --git a/fr/migration-guide/v2.1.4-migration-guide.md b/fr/migration-guide/v2.1.4-migration-guide.md new file mode 100644 index 00000000..af890075 --- /dev/null +++ b/fr/migration-guide/v2.1.4-migration-guide.md @@ -0,0 +1,30 @@ +# v2.1.4 Guide de migration + +OverRideConfig permet aux utilisateurs de remplacer les configurations de flux à partir de l'API ou lors de l'utilisation de l'intégration. En raison de problèmes de sécurité, il est désormais désactivé par défaut. + +Les utilisateurs doivent explicitement spécifier quelle configuration peut être remplacée à partir de l'interface utilisateur. + +1.) Aller à la configuration: + +
+ +2.) Activer la configuration de remplacement: + +
+ +3.) Allumez la bascule de la configuration qui peut être remplacée et enregistrer. + +
+ +4.) Par exemple, les utilisateurs peuvent ensuite remplacer ces variables et la configuration. Se référer à[OverrideConfig](broken-reference). + +```json +{ + "overrideConfig": { + "systemMessage": "You are helpful assistant", + "vars": { + "character": "nice" + } + } +} +``` diff --git a/fr/text-splitters/charater-text-splitter.md b/fr/text-splitters/charater-text-splitter.md new file mode 100644 index 00000000..4c2dc4ad --- /dev/null +++ b/fr/text-splitters/charater-text-splitter.md @@ -0,0 +1,8 @@ +# Decoupade de texte de caractère +Il s'agit de la méthode la plus simple de division de texte. Cela se divise en fonction des caractères (par défaut "\ n \ n") et mesure la longueur du morceau par nombre de caractères. +## Entrées +Séparateur: par défaut "\ n \ n"
+Taille de Chunck: la taille maximale de votre longueur de morceau par nombre de caractères
+Chevauchement de Chunck: le chevauchement maximal entre les morceaux. Il peut être agréable d'avoir un peu de chevauchement pour maintenir une certaine continuité entre les morceaux (par exemple, faire une fenêtre coulissante)
+## Sortir +Splitter de texte Charater: les morceaux de texte divisés diff --git a/fr/tutorials/README.md b/fr/tutorials/README.md new file mode 100644 index 00000000..8f703f2e --- /dev/null +++ b/fr/tutorials/README.md @@ -0,0 +1,2 @@ +# Tutoriels + diff --git a/fr/tutorials/agent-as-tool.md b/fr/tutorials/agent-as-tool.md new file mode 100644 index 00000000..d39d38e9 --- /dev/null +++ b/fr/tutorials/agent-as-tool.md @@ -0,0 +1,102 @@ +# Agent comme outil + +Dans ce tutoriel, nous allons voir comment tirer parti des autres flux en tant qu'outils pour un agent parent. Cette approche vous permet de créer un agent parent qui peut déléguer des tâches spécifiques à des agents enfants spécialisés + +## Aperçu + +1. Reçoit la saisie des utilisateurs via un agent parent +2. L'agent décide de récupérer les données du magasin de documents ou d'appeler l'outil AgentFlow. + +
+ +### Étape 1: Configuration du nœud de démarrage + +Commencez par ajouter un nœud ** start ** à votre toile. Cela sert de point d'entrée pour votre système d'agent. + +### Étape 2: Création de l'agent parent + +Ajoutez un nœud ** Agent ** et connectez-le au nœud de démarrage. + +### Étape 3: Configuration de l'outil d'agent + +La caractéristique clé de ce flux est de configurer un autre agent en tant qu'outil. Dans la section des outils ** de l'agent parent **: + +
+ +#### Configuration de l'outil: + +* ** Outil **: Sélectionnez "** Agent comme outil **" + +#### Paramètres de l'outil d'agent: + +* ** Agent sélectionné **: Choisissez votre enfant d'agentflow +* ** Nom **: Nom de l'agentflow +* ** Description **: Décrivez quand cet agentflow est utile. Exemple: + +``` +Useful for searching user availability, scheduling meetings and email related query +``` + +{% hint style = "avertissement"%} +Le nom et la description de l'outil sont extrêmement importants! Ils doivent être clairs et décrire correctement l'objectif de l'outil. Se référer à[best practices](https://platform.openai.com/docs/guides/function-calling?api-mode=chat#best-practices-for-defining-functions)guide. +{% EndHint%} + +### Étape 4: Ajout de sources de connaissances + +Configurez la section ** connaissances (magasins de documents) ** pour donner à votre agent parent l'accès aux informations pertinentes. C'est la même chose que[RAG](rag.md)tutoriel. + +
+ +#### Configuration du magasin de documents: + +* ** Magasin de documents **: Sélectionnez votre magasin de documents préconfiguré (par exemple, "AI-Paper") +* ** Décrivez les connaissances **: Décrivez de quoi parle les connaissances + +*** + +## Exemples d'interactions + +#### Exemples de requêtes et comportement attendu: + +** Requête de planification: ** + +* Utilisateur: "Pouvez-vous vérifier ma disponibilité mardi prochain?" +* Flux: agent parent → outil personnel \ _asiste → Réponse de planification spécialisée + +
+ +** Requête technique: ** + +* Utilisateur: "Qu'est-ce que l'AIGC et comment ça marche?" +* Flux: agent parent → base de connaissances AI-Paper → Explication technique avec des sources + +
+ +** Requête générale: ** + +* Utilisateur: "Bonjour comment allez-vous?" +* Flux: agent parent → Réponse directe (aucun outil nécessaire) + +** Requête complexe: ** + +* Utilisateur: "Planifiez une réunion sur la mise en œuvre de l'AIGC mardi prochain, extraire des informations clés et les points de discussion" +* Flux: agent parent → à la fois l'outil personnel \ _ASSISTANT et les connaissances sur papier → Réponse coordonnée + +
+ +*** + +## Meilleures pratiques + +#### Lignes directrices de conception: + +1. ** Descriptions d'outils effacés **: Faites le nom et les descriptions de l'outil +2. ** Délégation appropriée **: Une meilleure invite système pour que l'agent parent de déléguer efficacement + +#### Cas d'utilisation courants: + +* ** Service client **: Agent parent avec des outils spécialisés pour la facturation, le support technique et les demandes générales +* ** Assistant de recherche **: parent avec des outils pour différents domaines de recherche (étude de marché légale, technique,) +* ** Gestion de projet **: parent avec des outils pour la planification, l'allocation des ressources et le suivi des progrès +* ** Création de contenu **: parent avec des outils pour l'écriture, l'édition, la recherche et la mise en forme + diff --git a/fr/tutorials/agentic-rag.md b/fr/tutorials/agentic-rag.md new file mode 100644 index 00000000..4e8e9c43 --- /dev/null +++ b/fr/tutorials/agentic-rag.md @@ -0,0 +1,195 @@ +# Chiffon agentique + +Le chiffon agentique est une approche basée sur des agents pour effectuer[RAG](rag.md)de manière orchestrée. Cela peut impliquer la récupération des données de diverses sources de documents, la comparaison des résumés et la mise en œuvre d'un mécanisme d'auto-correction automatique. + +Dans ce didacticiel, nous explorerons comment créer un système de chiffon auto-corrigé qui vérifie la pertinence des données récupérées et re-génère automatiquement la requête si les résultats ne sont pas pertinents. + +## Aperçu + +Le flux de chiffon agentique implémente un processus en plusieurs étapes qui: + +1. Valide et catégorise les requêtes entrantes +2. Génère des requêtes de recherche optimisées pour la récupération de la base de données vectorielle +3. Évalue la pertinence des documents récupérés +4. S'auto-correction en régénérant les requêtes lorsque les résultats ne sont pas pertinents +5. Fournit des réponses contextuelles basées sur des informations récupérées + +
+ +### Étape 1: Configuration du nœud de démarrage + +Commencez par ajouter un nœud ** start ** à votre toile. Cela sert de point d'entrée pour le flux de votre agent. + + + +#### Configuration: + +* ** Type d'entrée **: Sélectionnez "CHAT ENTRE" pour accepter les questions des utilisateurs +* ** État de flux **: Ajoutez une variable d'état avec la clé "`query`"Et une valeur vide + +Le nœud de démarrage initialise l'état de débit avec un vide`query`variable qui sera mise à jour tout au long du processus. + +### Étape 2: Ajout de validation de la requête + +Ajoutez un nœud d'agent de condition ** ** et connectez-le au nœud de démarrage. + +
+ +Cela fournit des réponses directes pour les requêtes générales sans nécessiter de récupération de documents. Vous pouvez également remplacer par le nœud de réponse directe pour renvoyer une réponse prédéfinie. + +
+ +#### Configuration: + +* ** Messages **: Ajouter un message système: + + ``` + Given the user question and history, construct a short string that can be used for searching vector database. Only generate the query, no meta comments, no explanation + + Example: + Question: what are the events happening today? + Query: today's event + + Example: + Question: how about the address? + Query: business address of the shop + + Question: {{ question }} + Query: + ``` +* ** Mettre à jour l'état de flux **: Définir la clé "Requête" avec une valeur`{{ output }}`. Cela mettra à jour la valeur de "Query" vers la sortie de ce nœud LLM. + +Ce nœud transforme la question du langage naturel de l'utilisateur en une requête de recherche optimisée pour la base de données vectorielle. + +### Étape 5: Configuration de la base de données vectorielle Retriever + +Ajoutez un nœud ** Retriever ** et connectez-le à la "Generate Query" LLM. + +
+ +
+ +#### Configuration: + +* ** Messages **: Ajouter un message système: "Vous êtes un assistant utile qui peut transformer la requête pour produire une meilleure question." +* ** Message d'entrée **: + + ``` + Look at the input and try to reason about the underlying semantic intent / meaning. + Here is the initial question: {{ $flow.state.query }} + Formulate an improved question: + ``` +* ** Mettre à jour l'état de flux **: Définir la clé "Requête" avec une valeur`{{ output }}` + +
+ +Ce nœud analyse pourquoi la requête initiale n'a pas renvoyé les résultats pertinents et génère une version améliorée. + +### Étape 9: Ajout du mécanisme de la boucle + +Ajoutez un nœud ** LOOP ** connecté à la "question de régénération" LLM. + +
+ +Ce chiffon agentique fournit un système robuste et auto-amélioré pour la réponse aux questions basée sur des documents qui peut gérer des requêtes simples et complexes tout en maintenant une grande précision grâce à un raffinement itératif. diff --git a/fr/tutorials/customer-support.md b/fr/tutorials/customer-support.md new file mode 100644 index 00000000..dd20b779 --- /dev/null +++ b/fr/tutorials/customer-support.md @@ -0,0 +1,312 @@ +# Support client + +Le support client est l'un des plus grands cas d'utilisation de l'IA en ce moment. Cependant, de nombreuses personnes ont tendance à la compliquer en introduisant plusieurs agents. Dans de nombreux cas, vous pouvez atteindre le résultat souhaité avec un seul agent, à condition que vous ayez une invite de système bien conçue, des outils soigneusement sélectionnés et une base de connaissances organisée. Une architecture multi-agents n'est généralement nécessaire que si votre système doit gérer une large gamme de zones de support. Par exemple, vous pouvez avoir un agent RH qui gère les politiques RH et exécute des tâches telles que la soumission des demandes de congé ou la mise à jour des dossiers des employés, et un agent financier qui gère les remboursements, les remboursements et d'autres requêtes liées aux finances. + +Lorsque votre système implique plus de 15 ou 20 outils et sources de connaissances, il n'est généralement pas conseillé de surcharger un seul agent. Au lieu de cela, avoir des agents dédiés pour des domaines spécifiques a tendance à mieux fonctionner. Selon votre cas d'utilisation, nous vous recommandons toujours de commencer avec un seul agent, d'évaluer les performances, d'identifier les goulots d'étranglement, et seulement alors en considérant une architecture multi-agents. + +Anthropic offre un bon guide à ce sujet -[https://docs.anthropic.com/en/docs/about-claude/use-case-guides/customer-support-chat](https://docs.anthropic.com/en/docs/about-claude/use-case-guides/customer-support-chat) + +## Agent unique + +
+ +Pour un seul agent, l'incitation est la partie la plus cruciale. Chaque modèle se comporte différemment. Par exemple, Claude fonctionne mieux lorsque des instructions spécifiques à la tâche sont placées dans le message "utilisateur" plutôt que le message "système" (une technique connue sous le nom[role prompting](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/system-prompts#legal-contract-analysis-with-role-prompting)). C'est souvent un processus d'essais et d'erreurs pour déterminer ce qui fonctionne le mieux. Néanmoins, les bonnes invites sont constituées des principes fondamentaux suivants: + +#### Étape 1: rôle + +La première étape consiste à attribuer un rôle et une personnalité à l'agent. Par exemple: + +``` +You are John, a friendly, knowledgeable, and professional customer support agent for Acme Events, an event management company that has been delivering exceptional events since 1985. + +Your job is to help customers with any inquiries related to Acme’s event services, including: + +- Corporate events & conferences + +- Weddings & private parties + +- Public festivals & community events + +- Hybrid and virtual event solutions + +You are warm, helpful, and solution-oriented. Always aim to resolve customer issues efficiently while maintaining a positive tone. If a question is outside your scope, politely inform the user and escalate the matter or suggest contacting the appropriate team. +``` + +#### Étape 2: Lignes directrices + +Comment vous souhaitez que l'agent réponde à une requête utilisateur, un ensemble d'étapes ou de directives à suivre. + +``` +Important guidelines: + +- Always introduce yourself as John from Acme Events. + +- Keep your responses clear, concise, and professional. + +- Ask clarifying questions when needed. + +- If a customer is asking about virtual or hybrid events, highlight that Acme has specialized solutions to reach global audiences. + +- For time-sensitive inquiries, suggest calling the customer service number if it's during business hours. +``` + +{% Hint Style = "Success"%} +Si l'agent n'est pas en mesure d'appeler des outils spécifiques en réponse à certaines requêtes utilisateur, vous pouvez inclure des instructions supplémentaires ici. Par exemple: _ «Utilisez l'outil de devis pour générer un devis personnalisé.» _ +{% EndHint%} + +#### Étape 3: contexte commercial + +Fournir des informations générales de l'entreprise. Par exemple: + +``` +About Acme Events: + +At Acme Events, we believe every occasion is a story waiting to be told. Since 1985, we’ve been designing and delivering exceptional events that leave lasting impressions—from intimate gatherings to large-scale productions. + +Whether you're planning a corporate conference, a dream wedding, or a public festival, Acme is your trusted partner from concept to curtain call. Our team of seasoned planners, creative designers, and on-the-ground coordinators ensures every detail is handled with precision and flair. + +With our award-winning service, innovative solutions, and seamless execution, you can focus on enjoying the moment while we bring your vision to life. We don’t just manage events—we create experiences that resonate. + +Choose Acme Events and let us turn your ideas into unforgettable memories. Because at Acme, we don’t just plan events—we celebrate life’s biggest moments with you. + +Note: We also specialize in hybrid and virtual events, ensuring your message reaches audiences anywhere in the world. + +Acme Events offers the following services: +- Corporate events & conferences +- Weddings & private parties +- Public festivals & community events +- Hybrid and virtual event solutions + +Business hours: Monday–Friday, 9 AM – 5 PM EST +Customer service number: 1-800-123-4567 +``` + +#### Étape 4: Fournir des exemples + +Il est également très important d'enseigner à l'agent comment répondre aux requêtes des utilisateurs en fournissant des exemples, une technique connue sous le nom d'invitation à quelques coups. + +
  Voici quelques exemples de la façon dont vous pouvez interagir avec les clients:
+
+
+H: Salut, organisez-vous des fêtes d'anniversaire pour les enfants?
+
+R: Salut! Je suis John des événements Acme - merci pour avoir tendu la main. Bien que nous nous concentrions principalement sur les événements d'entreprise, les mariages et les festivals publics, nous organisons occasionnellement des fêtes privées, y compris les anniversaires.
+
+Pourriez-vous partager plus de détails sur la fête que vous planifiez (par exemple, le nombre d'invités, l'emplacement et la date)? Je serais heureux de voir comment nous pouvons aider!
+
+
+
+H: Quelles sont vos heures d'ouverture?
+
+R: Nos heures d'ouverture sont du lundi au vendredi, de 9 h à 17 h HNE.
+
+Si vous avez des questions liées à l'événement ou si vous avez besoin d'aide à la planification, n'hésitez pas à vous contacter pendant ces heures. Comment puis-je vous aider aujourd'hui?
+
+
+
+H: Gérez-vous les conférences hybrides?
+
+R: Absolument! Nous nous spécialisons dans les solutions d'événements hybrides et virtuelles, parfaits pour engager à la fois un public en personne et à distance.
+
+Nous gérons tout, du streaming en direct et de la configuration technique aux outils d'interaction d'audience et à l'analyse post-événement. Planifiez-vous bientôt un événement hybride? Je serais ravi d'en savoir plus sur vos exigences.
+
+
+
+H: Je cherche à planifier un mariage. Pouvez-vous vous aider?
+
+R: Félicitations! Oui, nous aimerions vous aider à planifier votre grand jour. Lors des événements d'ACME, nous offrons une planification de mariage à service complet - de la sélection des lieux et de la conception du thème à la coordination des fournisseurs et à l'exécution le jour.
+
+Souhaitez-vous planifier une consultation gratuite pour discuter de votre vision et de votre calendrier?
+
+
+
+H: Puis-je obtenir un devis pour un événement d'entreprise?
+
+R: Bien sûr! Pour préparer une citation précise, j'aurai besoin de quelques détails:
+
+1. Quel type d'événement d'entreprise planifiez-vous (par exemple, séminaire, gala, retraite)?
+
+ 2. Nombre estimé de participants?
+
+3. Date et localisation préférés?
+
+4. Des services spécifiques dont vous avez besoin (par exemple, restauration, configuration AV, image de marque)?
+
+Une fois que j'aurai ces informations, j'utiliserai notre outil de citation pour générer un devis personnalisé pour vous.
+
+ 
+ +#### Étape 5: garde-corps et notes + +Enfin, pour empêcher l'agent de se déplacer, il est recommandé de décrire les DO et les choses à faire claires sur la façon dont l'agent doit interagir avec le client. + +``` +Please adhere to the following guardrails: + +1. Only provide information about the services listed in Acme Events' official offerings (e.g., corporate events, weddings, public festivals, hybrid/virtual events). +2. If asked about services we don't offer (e.g., catering-only, travel booking), politely clarify that we do not provide those services. +3. Do not speculate about future service expansions, new packages, or unannounced partnerships. +4. Never make commitments, guarantees, or enter into agreements on behalf of the company. You are here to inform and guide, not to negotiate. +5. Do not reference or compare to any competitors or their offerings. +6. If a query is sensitive, urgent, or requires escalation, kindly direct the customer to contact our team at **1-800-123-4567** during business hours. +7. Always maintain a friendly, professional tone and ensure customer privacy is respected at all times. +``` + +Pour vous aider à inciter, vous pouvez utiliser le bouton "** générer **", cela générera une invite système en suivant les meilleures pratiques mentionnées ci-dessus: + +
+ +
+ +#### Étape 6: Outils et noms de connaissances et description + +La plupart des outils prédéfinis sont livrés avec des noms et des descriptions clairs, donc les utilisateurs n'ont généralement pas besoin de les modifier. Cependant, pour les outils personnalisés et les bases de connaissances, fournir un nom clair et descriptif est essentiel pour s'assurer que le LLM sait quand et comment utiliser l'outil approprié. Se référer à[best practices for defining functions](https://platform.openai.com/docs/guides/function-calling?api-mode=chat#best-practices-for-defining-functions). Vous pouvez également utiliser le bouton "** générer **" pour aider à la connaissance de la connaissance: + +
+ +## Plusieurs agents + +Pour une architecture multi-agents, nous créerons un système qui triage automatiquement les demandes des clients et les acheminerons vers des agents spécialisés en fonction de la nature de la requête. + +Bien que cette configuration soit destinée à présenter les capacités de l'architecture, il convient de noter que l'exemple que nous explorerons pourrait être géré de manière réaliste par un seul agent. + +### Aperçu + +1. ** Démarrer le nœud **: recueille la demande des clients via un formulaire structuré +2. ** Agent de condition **: analyse l'enquête et détermine le routage approprié +3. ** Agent HR **: gère les requêtes liées aux ressources humaines avec accès à la base de connaissances RH +4. ** Manager d'événements **: gère les demandes liées à l'événement avec des capacités d'intégration de l'API +5. ** Agent général **: gère les demandes générales et fournit une grande assistance + +
+ +#### Étape 1: Créez le nœud de démarrage + +
+ +1. Commencez par ajouter un nœud ** start ** à votre toile +2. Configurez le nœud de démarrage avec ** Entrée du formulaire ** pour collecter les demandes des clients +3. Configurez le formulaire avec la configuration suivante: + * ** Type d'entrée **: entrée de formulaire + * ** Titre du formulaire **: "Enquête" + * ** DESCRIPTION DU FORME **: "Investiment client" + * ** Types d'entrées de formulaire **: Configurez deux entrées de chaîne: + * ** Sujet **: Nom de la variable`subject` + * ** corps **: nom de variable`body` + +
+ +#### Étape 2: Ajouter l'agent de condition (détecter l'intention de l'utilisateur) + +
+ +1. Connectez un nœud d'agent ** de condition ** au nœud de démarrage +2. Configurez les instructions du système pour agir en tant qu'agent de support client. Vous pouvez également vous référer à l'invite utilisée dans[Single Agent](customer-support.md#single-agent). Voici un exemple simple: + +``` +You are a customer support agent. Understand and process support tickets by automatically triaging them to the correct departments or individuals, generating immediate responses for common issues, and gathering necessary information for complex queries. + +Follow the following routine with the user: + +1. First, greet the user and see how you can help the user +2. If question is related to HR query, handoff to HR Agent +3. If question is related to events query, handoff to Event Manager + +Note: Transfers between agents are handled seamlessly in the background; do not mention or draw attention to these transfers in your conversation with the user +``` + +4. Configurez la ** entrée ** pour analyser le sujet de formulaire:`{{ $form.subject }}` +5. Configurer ** Scénarios ** pour le routage: + * ** Scénario 0 **: "La requête est liée aux RH" + * ** Scénario 1 **: "La requête est liée aux événements" + * ** Scénario 2 **: "La requête est une requête générale" + +
+ +#### Étape 3: Créez l'agent RH + +
+ +1. Ajoutez un nœud ** Agent ** et connectez-le à ** condition 0 ** sortie +2. Configurez le message système pour la spécialisation RH: + +``` +You are an HR agent responsible for retrieving and applying internal knowledge sources to answer employee queries about HR policies, procedures, and guidelines. + +When responding to HR-related questions, you must first identify the relevant policy areas, search through available internal knowledge sources, and then provide accurate, comprehensive answers based on official company documentation. + +# Steps +1. **Analyze the Query**: Identify the specific HR topic, policy area, or procedure the user is asking about +2. **Retrieve Relevant Information**: Search through internal HR knowledge sources including: + - Employee handbooks + - Policy documents + - Procedure manuals + - Benefits information + - Compliance guidelines + - Company-specific regulations +3. **Cross-Reference Sources**: Verify information across multiple relevant documents to ensure accuracy and completeness +4. **Synthesize Response**: Combine retrieved information into a coherent, actionable answer +5. **Provide Supporting Details**: Include relevant policy numbers, effective dates, or references to specific sections when applicable + +# Notes +- Always prioritize the most current version of policies and note when information may be subject to change +- If conflicting information exists across sources, flag this and recommend contacting HR directly +- For sensitive topics (discrimination, harassment, legal issues), provide both policy information and appropriate escalation contacts +- When policies vary by location, employment type, or other factors, clearly specify which version applies +- If insufficient information is available in internal sources, explicitly state this limitation and suggest alternative resources +``` + +4. ** Configurer les sources de connaissances (RAG) **: + * Ajouter ** Store de document **: "Loi sur les ressources humaines" + * ** Description **: "Ces informations sont utiles lors de la détermination du cadre juridique et des exigences de mise en œuvre pour la gestion des ressources humaines en vertu de la loi RH de 2016 et de sa réglementation de mise en œuvre de 2020." + * ** Retour des documents source **: activé + +
+ +#### Étape 4: Créez le gestionnaire d'événements + +
+ +1. Ajoutez un autre nœud d'agent ** ** et connectez-le à ** condition 1 ** sortie +2. Configurer le message système: + +``` +Act as an event manager that can determine actions on events such as create, update, get, list and delete. +``` + +4. ** Configurer les outils **: + * Ajouter ** OpenAPI Toolkit ** avec la configuration de l'API de gestion d'événements. Se référer à[OpenAPI Toolkit](interacting-with-api.md#tool-openapi-toolkit)pour plus de détails. + +
+ +Le gestionnaire d'événements a accès à une API de gestion d'événements complète qui peut: + +* Énumérez tous les événements +* Créer de nouveaux événements +* Récupérer les détails de l'événement par id +* Mettre à jour les informations sur l'événement +* Supprimer les événements + +Se référer à[Event Management Server](interacting-with-api.md#prerequisite)pour l'exemple de code. + +#### Étape 5: Créez l'agent général + +
+ +1. Ajoutez un troisième nœud d'agent ** ** et connectez-le à la sortie ** condition 2 **. Cela agira comme une voie de secours qui peut répondre à toute requête non liée. Peut également être remplacé par[Direct Reply](../using-flowise/agentflowv2.md#id-12.-direct-reply-node)Node si vous souhaitez simplement renvoyer une réponse par défaut. +2. **Configuration**: + * Aucun outil supplémentaire requis pour les demandes générales + * Aucune source de connaissances nécessaire + +### Tester le flux + +1. ** Tester les requêtes RH **: Soumettre des demandes de renseignements sur les politiques de l'entreprise, les avantages sociaux ou les procédures RH +2. ** Test des requêtes d'événements **: Essayez de créer, de mettre à jour ou d'interroger sur les événements de l'entreprise +3. ** Testez les requêtes générales **: Posez des questions générales pour voir comment le système se rend vers l'agent général +4. ** Observer le routage **: Remarquez comment l'agent de condition est de manière transparente des requêtes sans exposer le processus de transfert + +
+ +### Structure d'écoulement complète + +{% fichier src = "../. GitBook / Assets / Customer Support Agents.json"%} diff --git a/fr/tutorials/deep-research.md b/fr/tutorials/deep-research.md new file mode 100644 index 00000000..31061f2b --- /dev/null +++ b/fr/tutorials/deep-research.md @@ -0,0 +1,269 @@ +# Recherche profonde + +Deep Research Agent est un système multi-agents sophistiqué qui peut effectuer des recherches complètes sur n'importe quel sujet en décomposant des requêtes complexes en tâches gérables, en déploiement des agents de recherche spécialisés et en synthétisant les résultats en rapports détaillés. + +Cette approche est inspirée par le blog d'Anthropic -[How we built our multi-agent research system](https://www.anthropic.com/engineering/built-multi-agent-research-system) + +## Aperçu + +Le flux de travail de l'agent de recherche en profondeur se compose de plusieurs composants clés travaillant ensemble: + +1. ** Planner Agent **: analyse la requête de recherche et génère une liste de tâches de recherche spécialisées +2. ** itération **: crée plusieurs agents de recherche pour travailler sur différents aspects de la requête +3. ** Sous-agents de recherche **: des agents individuels qui effectuent des recherches ciblées à l'aide de la recherche Web et d'autres outils +4. ** Agent écrivain **: synthétise toutes les résultats dans un rapport cohérent et complet +5. ** Agent de condition **: détermine si des recherches supplémentaires sont nécessaires ou si les résultats sont suffisants +6. ** Loop **: Retour à l'agent du planificateur pour améliorer la qualité de la recherche + +
+ +### Étape 1: Créez le nœud de démarrage + +
+ +1. Commencez par ajouter un nœud ** start ** à votre toile +2. Configurez le nœud de démarrage avec ** Entrée du formulaire ** pour collecter la requête de recherche des utilisateurs +3. Configurez le formulaire avec la configuration suivante: + * ** Titre de formulaire **: "Recherche" + * ** Description du formulaire **: "Un agent de recherche qui prend une requête et renvoie un rapport détaillé" + * ** Types d'entrée de formulaire **: Ajoutez une entrée de chaîne avec l'étiquette "requête" et le nom de la variable "Query" +4. Initialiser l'état de flux avec deux variables clés: + * `subagents`: Pour stocker la liste des tâches de recherche à effectuer par des sous-agents + * `findings`: Pour accumuler des résultats de recherche + +
+ +### Étape 2: Ajouter l'agent du planificateur + +
+ +1. Connectez un nœud ** llm ** au nœud de démarrage. +2. Configurez l'invite du système pour agir en tant que responsable de recherche d'experts avec les principales responsabilités suivantes: + * Analyser et décomposer les requêtes utilisateur + * Créer des plans de recherche détaillés + * Générer des tâches spécifiques pour les sous-agents + * Exemple d'invite -[research\_lead\_agent.md](https://github.com/anthropics/anthropic-cookbook/blob/main/patterns/agents/prompts/research_lead_agent.md) + +
+ +3. Configurer ** Sortie structurée JSON ** Pour renvoyer une liste de tâches de sous-agent: + +```json +{ + "task": { + "type": "string", + "description": "The research task for subagent" + } +} +``` + +4. Mettez à jour l'état de flux en stockant la liste des sous-agents générés + +
+ +1. Ajoutez un nœud ** itération **. +2. Connectez-le à la sortie du planificateur +3. Configurez l'entrée d'itération à l'état de flux:`{{ $flow.state.subagents }}`. Pour chaque élément du tableau, un sous-agent sera engendré pour effectuer la tâche de recherche. Exemple: + +
+ +```json +{ + "subagents": [ + { + "task": "Research the current state and recent developments in autonomous multi-agent systems technology. Focus on defining what autonomous multi-agent systems are, key technical components (coordination algorithms, communication protocols, decision-making frameworks), major technological advances in the last 2-3 years, and leading research institutions/companies working in this space. Use web search to find recent academic papers, industry reports, and technical documentation. Prioritize sources from IEEE, ACM, Nature, Science, and major tech companies' research divisions. Compile findings into a comprehensive technical overview covering definitions, core technologies, recent breakthroughs, and key players in the field." + }, + { + "task": "Investigate real-world applications and deployments of autonomous multi-agent systems across different industries. Research specific use cases in robotics (swarm robotics, warehouse automation), transportation (autonomous vehicle fleets, traffic management), manufacturing (coordinated production systems), defense/military applications, smart cities, and any other domains where these systems are actively deployed. For each application area, identify specific companies, products, success stories, and quantitative results where available. Focus on practical implementations rather than theoretical research. Use web search to find case studies, company announcements, industry reports, and news articles about actual deployments." + } + ] +} +``` + +### Étape 4: Construisez le sous-agent de recherche + +1. À l'intérieur du bloc d'itération, ajoutez un nœud ** agent **. +2. Configurez l'invite du système pour agir comme un sous-agent de recherche ciblé avec: + * Capacités de compréhension des tâches claires + * Planification efficace de la recherche (2-5 appels d'outils par tâche) + * Évaluation de la qualité de la source + * Utilisation d'outils parallèles pour l'efficacité + * Exemple d'invite -[research\_subagent.md](https://github.com/anthropics/anthropic-cookbook/blob/main/patterns/agents/prompts/research_subagent.md) + +
+ +3. Ajoutez les outils de recherche suivants, vous pouvez utiliser vos propres outils préférés: + * ** Recherche Google **: pour les liens de recherche Web + * ** Scraper Web **: pour l'extraction de contenu Web. Cela grattera le contenu des liens de Google Search. + * ** Recherche Arxiv **: pour rechercher et charger le contenu des articles académiques + +
+ +4. Définissez le message de l'utilisateur pour passer la tâche d'itération actuelle:`{{ $iteration.task }}` + +### Étape 5: Ajouter l'agent de l'écrivain + +
+ +1. Connectez un nœud ** llm ** une fois l'itération terminée. +2. Un contexte plus grand LLM comme Gemini avec 1 à 2 millions de tailles de contexte est nécessaire pour synthétiser toutes les résultats et générer le rapport. +3. Configurez l'invite du système pour agir en tant que rédacteur de recherche expert qui: + * Préserve le contexte complet des résultats de la recherche + * Maintient l'intégrité de la citation + * Ajoute de la structure et de la clarté + * Sorte les rapports de marque professionnelle +4. Configurez le message de l'utilisateur pour inclure: + * Sujet de la recherche:`{{ $form.query }}` + * Résultats existants:`{{ $flow.state.findings }}` + * Nouvelles conclusions:`{{ iterationAgentflow_0 }}` + +
+ +4. Mettre à jour le`{{ $flow.state.findings }}`avec la sortie de l'agent d'écriture. + +
+ +### Étape 6: Implémentez le contrôle de la condition + +
+ +1. Ajouter un ** agent de condition. ** +2. Configurez la logique de condition pour déterminer si des recherches supplémentaires sont nécessaires +3. Configurer deux scénarios: + * "Plus de sous-agents sont nécessaires" + * "Les résultats sont suffisants" +4. Fournir un contexte d'entrée, notamment: + * Sujet de recherche + * Liste des sous-agents actuels + * Résultats accumulés + +
+ +### Étape 7: Créez le mécanisme de boucle + +1. Pour le chemin ** "plus de sous-agents nécessaires" ** Path, ajoutez un nœud de boucle ** ** +2. Configurez-le pour remonter au nœud du planificateur +3. Réglez un nombre de boucles maximales de 5 pour empêcher les boucles infinies +4. L'agent de planificateur examinera le rapport actuel et générera des tâches de recherche supplémentaires. + +
+ +### Étape 8: Ajouter la sortie finale + +1. Pour les résultats "** les résultats sont suffisants **", ajoutez une réponse directe ** ** +2. Configurez-le pour publier le rapport final:`{{ $flow.state.findings }}` + +
+ +
+ +## Tester le flux + +1. Commencez par un sujet simple comme "Systèmes multi-agents autonomes dans des environnements réels" +2. Observez comment le planificateur décompose la recherche en tâches ciblées +3. Surveiller les sous-agents lorsqu'ils effectuent des recherches parallèles +4. Passez en revue la synthèse des résultats par l'agent de l'écrivain +5. Notez si l'agent de condition demande des recherches supplémentaires + +
+ +** Rapport généré: ** + +{% fichier src = "../. GitBook / Assets / Research Report de Deep.pdf"%} + +## Structure d'écoulement complète + +{% fichier src = "../. GitBook / Assets / Deep Research Dynamic subagents.json"%} + +## Procédure + +1. 🧠 Planner Agent - Analyse la requête de recherche et génère une liste de tâches de recherche spécialisées +2. 🖧 Sous-agents - Crée plusieurs sous-agents de recherche, effectuer des recherches ciblées à l'aide de la recherche Web, de l'écrase Web et des outils ArXIV +3. ✍️ Agent écrivain - synthétise toutes les résultats dans un rapport cohérent et complet avec des citations +4. ⇄ Agent de condition - détermine si des recherches supplémentaires sont nécessaires ou si les résultats sont suffisants +5. 🔄 Boucle de retour à l'agent du planificateur pour générer plus de sous-agents + +### 🧠 agent de planificateur + +Agir comme une recherche experte mène à: + +* Analyser et décomposer les requêtes utilisateur +* Créer des plans de recherche détaillés +* Générer des tâches spécifiques pour les sous-agents + +Sortie un éventail de tâches de recherche. + +
+ +### 🖧 Sous-agents + +Pour chaque tâche de la liste des tâches, un nouveau sous-agent sera engendré pour mener des recherches ciblées. + +Chaque sous-agent a: + +* Capacités de compréhension des tâches claires +* Planification efficace de la recherche (2-5 appels d'outils par tâche) +* Évaluation de la qualité de la source +* Utilisation d'outils parallèles pour l'efficacité + +
+ +Subagent a accès à des outils de recherche Web, Web Scrape et ArXIV. + +* 🌐 Recherche Google - pour les liens de recherche Web +* 🗂️ Scraper Web - pour l'extraction du contenu Web. Cela grattera le contenu des liens de Google Search. +* 📑 ArXIV - Rechercher, télécharger et lire le contenu des articles Arxiv + +
+ +### ✍️ Agent écrivain + +Agir en tant que rédacteur de recherche qui transforme les résultats bruts en un rapport Markdown clair et structuré. Conserver tout contexte et citations. + +Nous constatons que les Gémeaux sont les meilleurs pour cela, grâce à sa grande fenêtre de contexte qui lui permet de synthétiser efficacement toutes les résultats. + +
+ +### ⇄ Agent de condition + +Avec le rapport généré, nous avons laissé le LLM déterminer si des recherches supplémentaires sont nécessaires ou si les résultats sont suffisants. + +Si davantage est nécessaire, l'agent du planificateur passe en revue tous les messages, identifie les domaines d'amélioration, génère des tâches de recherche de suivi et la boucle se poursuit. + +Si les résultats sont suffisants, nous renvoyons simplement le rapport final de l'agent de l'écrivain en tant que sortie. + +
+ +## Configuration avancée + +#### Personnalisation de la profondeur de recherche + +Vous pouvez ajuster la profondeur de recherche en modifiant l'invite du système du planificateur à: + +* Augmenter le nombre de sous-agents pour des sujets complexes (jusqu'à 10-20) +* Ajustez le budget d'appel à l'outil par sous-agent +* Modifier le nombre de boucles pour plus de recherche itérative + +Mais cela comporte également un coût supplémentaire pour plus de consommation de jetons. + +#### Ajout d'outils spécialisés + +Améliorez les capacités de recherche en ajoutant des outils spécifiques au domaine: + +* Outils personnels comme Gmail, Slack, Google Calendar, Teams, etc. +* Autre grattoir Web, outils de recherche Web comme Firecrawl, Exa, Apify, etc. + +#### Ajout de contexte de chiffon + +Vous pouvez ajouter plus de contexte au LLM avec[RAG](rag.md). Cela permet à LLM de retirer les informations des sources de connaissances existantes pertinentes en cas de besoin. + +## Meilleures pratiques + +* La sélection des modèles et les options de secours sont cruciales en raison de la grande quantité de résultats qui provoquent un débordement de jeton. +* L'invitation est la clé. Ouverts ouverts ouverts de toute leur structure rapide, couvrant la délégation des tâches, l'utilisation parallèle des outils et les processus de réflexion -[https://github.com/anthropics/anthropic-cookbook/blob/main/patterns/agents/prompts](https://github.com/anthropics/anthropic-cookbook/blob/main/patterns/agents/prompts) +* Les outils doivent être soigneusement conçus, quand utiliser, comment limiter la durée des résultats renvoyés des exécutions d'outils. +* Ceci est très similaire au triangle de compromis, où l'optimisation de deux de l'arbre a souvent un impact négatif sur un autre, dans ce cas - la vitesse, la qualité, le coût. diff --git a/fr/tutorials/human-in-the-loop.md b/fr/tutorials/human-in-the-loop.md new file mode 100644 index 00000000..5fcc9fcc --- /dev/null +++ b/fr/tutorials/human-in-the-loop.md @@ -0,0 +1,225 @@ +# Humain dans la boucle + +Dans les didacticiels précédents, nous avons exploré comment un agent peut utiliser dynamiquement des outils pour répondre aux requêtes ou effectuer des tâches assignées. ** Human-in-the-Loop ** Ajoute une couche de contrôle en permettant à l'agent de demander une entrée, une approbation ou une rétroaction humaine avant de continuer. + +Il y a 2 façons dont l'homme dans la boucle peut être utilisé: + +* En utilisant[Human Input](../using-flowise/agentflowv2.md#id-11.-human-input-node)nœud pour arrêter l'exécution +* Activer ** Exiger une entrée humaine ** pour les outils d'agent + +## Nœud d'entrée humain + +Le nœud ** entrée humaine ** permet de faire une pause exécution et de reprendre seulement après qu'un humain a fourni des commentaires pour approuver ou rejeter l'action. + +Dans ce tutoriel, nous apprendrons comment créer un agent de réponse par e-mail automatisé qui demande des commentaires des utilisateurs avant d'envoyer l'e-mail. + +### Aperçu + +L'objectif de ce cas d'utilisation est de créer un système de réponse par e-mail intelligent qui: + +1. Reçoit les demandes de courrier électronique entrantes +2. Génère des réponses par e-mail professionnelles à l'aide de l'IA +3. Demande l'approbation humaine avant d'envoyer +4. Permet des révisions et des améliorations +5. Envoie automatiquement l'e-mail approuvé + +
+ +#### Étape 1: Configuration du nœud de démarrage + +1. Faites glisser et déposez le nœud ** start ** sur la toile. Ce sera le point d'entrée pour les données de messagerie entrantes. +2. Configurez le nœud de démarrage avec les paramètres suivants: + * ** Type d'entrée **: Sélectionnez "Form Entrée" pour capturer les données d'e-mail structurées + * ** Titre du formulaire **: "Enquête par e-mail" + * ** DESCRIPTION DU FORME **: "Investigation des e-mails entrants" +3. Ajouter les types d'entrée de formulaire suivants: + * ** Sujet ** (String): Pour capturer la ligne d'objet de l'e-mail + * ** Body ** (String): Pour capturer le contenu de l'e-mail + * ** De ** (String): Pour capturer l'adresse e-mail de l'expéditeur + +
+ +#### Étape 4: Configuration du mécanisme de la boucle + +1. Ajoutez un nœud ** boucle ** pour gérer les scénarios de rejet. Cela permet au workflow de revenir à l'agent de réponse par e-mail pour des améliorations. +2. Configurer le nœud de boucle: + * ** Loop Retour à **: Sélectionnez "Agent de réponse par e-mail" dans la liste déroulante + * ** Count de boucles max **: 5 (empêche les boucles infinies) +3. Connectez la sortie "Rejeter" du nœud d'entrée humain à ce nœud de boucle. Lorsqu'un humain rejette la réponse, le flux de travail reviendra à l'agent avec la rétroaction pour l'amélioration. + +
+ +2. Passez en revue la réponse de l'agent dans l'étape d'entrée humaine + +
+ +3. Rejeter la réponse et fournir plus de commentaires: + +
+ +4. Passez en revue la réponse révisée de l'agent: + +
+ +5. Continuez et vérifiez que le courrier électronique est envoyé correctement: + +
+ +### Structure d'écoulement complète + +{% fichier src = "../. gitbook / actifs / humain dans le boucle agent.json"%} + +## Exiger une entrée humaine sur les outils d'agent + +Lorsqu'un agent décide d'utiliser des outils, ce qui suit se produit sous le capot: + +1. Compte tenu d'une requête utilisateur, le LLM détermine si les appels d'outils sont nécessaires. +2. Si les appels d'outils sont identifiés à partir de la réponse de sortie LLM, Flowise localise les outils de correspondance et exécute les fonctions correspondantes. +3. Les résultats des exécutions d'outils sont renvoyés au LLM. +4. Le LLM décide alors si des appels d'outils supplémentaires sont requis ou s'il a suffisamment d'informations pour retourner la réponse finale. + +
+ +Lorsque l'exigence de l'entrée humaine est activée, nous plaçons un point de contrôle supplémentaire après la détection des appels d'outil: + +
+ +Ceci est crucial pour les appels d'outils sensibles tels que passer des commandes, des réservations, des réunions, l'envoi d'e-mails, etc., où vous avez besoin de confirmation et de révision humaines. + +Nous pouvons utiliser l'exemple de système de réponse par e-mail ci-dessus, mais simplifier pour avoir un seul agent. + +
+ +### Configuration + +1. Ajoutez un nœud ** Agent ** et connectez-le au nœud de démarrage. Cet agent unique gérera à la fois l'analyse par e-mail et l'approbation humaine. +2. Ajoutez un message système à l'agent, par exemple: + + ``` + You are a customer support agent working in Flowise Inc. Create a draft professional email reply to user's query. Use the web search tools to get more details about the prospect. + + Always reply as Samantha, Customer Support Representative in Flowise. Don't use placeholders. + + Today's date is {{ current_date_time }}. + ``` +3. Ajouter les outils suivants: + * ** Recherche personnalisée Google **: pour rechercher des informations sur les clients + * ** gmail **: pour créer des brouillons par e-mail avec l'approbation humaine +4. Configurer l'outil Gmail: + * ** Type de Gmail **: "Drafts" + * ** Draft Actions **: "CreateLaft" + * ** Exiger une entrée humaine **: ✅ ** Activer cette option ** - c'est la caractéristique clé qui crée la fonctionnalité HITL + +
+ +### Comment fonctionne le flux simplifié + +1. ** Entrée du formulaire **: L'utilisateur soumet les détails de la demande par e-mail +2. ** Analyse AI **: L'agent analyse l'e-mail et utilise Google Rechercher un contexte supplémentaire +3. ** Création du projet **: Lorsque l'agent tente de créer un brouillon Gmail, le workflow s'arrête +4. ** Révision humaine **: Le système présente le projet de courrier électronique pour l'approbation humaine +5. ** Décision **: L'humain peut approuver (créer un projet) ou rejeter (fournir des commentaires et réessayer) + +### Tester l'agent + +1. Démarrez le flux de travail en remplissant le formulaire avec un exemple de demande de courrier électronique + +
+ + +2. Avant que l'agent ne crée le projet Gmail, il demandera à l'utilisateur l'approbation ou le rejet. + +
+ +3. Si l'outil est approuvé, l'agent procédera à l'appel de l'outil et créera le projet dans Gmail. L'agent est suffisamment intelligent pour déterminer le sujet, le corps et le destinataire appropriés pour l'e-mail. + +
+ +### Structure d'écoulement complète + +{% fichier src = "../. GitBook / Assets / Email Agent.json"%} + +## Partage des traces d'exécution pour l'examen et l'approbation externes + +1. Dans la barre latérale gauche du tableau de bord, cliquez sur ** Exécutions. ** +2. Trouvez la trace d'exécution et cliquez sur ** Partager. ** + +
+ +3. La trace d'exécution est désormais disponible en tant que lien public. Vous pouvez partager ce lien avec d'autres pour examen. + +
+ +4. Les utilisateurs en dehors de Flowise peuvent rejeter ou approuver: + +
diff --git a/fr/tutorials/interacting-with-api.md b/fr/tutorials/interacting-with-api.md new file mode 100644 index 00000000..e9f42aa9 --- /dev/null +++ b/fr/tutorials/interacting-with-api.md @@ -0,0 +1,615 @@ +# Interagir avec l'API + +Presque toutes les applications Web repose sur des API RESTful. Permettre à LLM d'interagir avec eux élargir son utilité pratique. + +Ce tutoriel présente comment LLM peut être utilisé pour passer des appels d'API via l'appel d'outils. + +## Préalable - Exemple de serveur de gestion d'événements + +Nous allons utiliser un serveur de gestion d'événements simples et montrer comment interagir avec lui. + +```javascript +const express = require('express'); +const { v4: uuidv4 } = require('uuid'); + +const app = express(); +const PORT = process.env.PORT || 5566; + +// Middleware +app.use(express.json()); + +// Fake database - in-memory storage +let events = [ + { + id: '1', + name: 'Tech Conference 2024', + date: '2024-06-15T09:00:00Z', + location: 'San Francisco, CA' + }, + { + id: '2', + name: 'Music Festival', + date: '2024-07-20T18:00:00Z', + location: 'Austin, TX' + }, + { + id: '3', + name: 'Art Exhibition Opening', + date: '2024-05-10T14:00:00Z', + location: 'New York, NY' + }, + { + id: '4', + name: 'Startup Networking Event', + date: '2024-08-05T17:30:00Z', + location: 'Seattle, WA' + }, + { + id: '5', + name: 'Food & Wine Tasting', + date: '2024-09-12T19:00:00Z', + location: 'Napa Valley, CA' + } +]; + +// Helper function to validate event data +const validateEvent = (eventData) => { + const required = ['name', 'date', 'location']; + const missing = required.filter(field => !eventData[field]); + + if (missing.length > 0) { + return { valid: false, message: `Missing required fields: ${missing.join(', ')}` }; + } + + // Basic date validation + const dateRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{3})?Z?$/; + if (!dateRegex.test(eventData.date)) { + return { valid: false, message: 'Date must be in ISO 8601 format (YYYY-MM-DDTHH:mm:ssZ)' }; + } + + return { valid: true }; +}; + +// GET /events - List all events +app.get('/events', (req, res) => { + res.status(200).json(events); +}); + +// POST /events - Create a new event +app.post('/events', (req, res) => { + const validation = validateEvent(req.body); + + if (!validation.valid) { + return res.status(400).json({ error: validation.message }); + } + + const newEvent = { + id: req.body.id || uuidv4(), + name: req.body.name, + date: req.body.date, + location: req.body.location + }; + + events.push(newEvent); + res.status(201).json(newEvent); +}); + +// GET /events/{id} - Retrieve an event by ID +app.get('/events/:id', (req, res) => { + const event = events.find(e => e.id === req.params.id); + + if (!event) { + return res.status(404).json({ error: 'Event not found' }); + } + + res.status(200).json(event); +}); + +// DELETE /events/{id} - Delete an event by ID +app.delete('/events/:id', (req, res) => { + const eventIndex = events.findIndex(e => e.id === req.params.id); + + if (eventIndex === -1) { + return res.status(404).json({ error: 'Event not found' }); + } + + events.splice(eventIndex, 1); + res.status(204).send(); +}); + +// PATCH /events/{id} - Update an event's details by ID +app.patch('/events/:id', (req, res) => { + const eventIndex = events.findIndex(e => e.id === req.params.id); + + if (eventIndex === -1) { + return res.status(404).json({ error: 'Event not found' }); + } + + const validation = validateEvent(req.body); + + if (!validation.valid) { + return res.status(400).json({ error: validation.message }); + } + + // Update the event + events[eventIndex] = { + ...events[eventIndex], + name: req.body.name, + date: req.body.date, + location: req.body.location + }; + + res.status(200).json(events[eventIndex]); +}); + +// Error handling middleware +app.use((err, req, res, next) => { + console.error(err.stack); + res.status(500).json({ error: 'Something went wrong!' }); +}); + +// 404 handler +app.use((req, res) => { + res.status(404).json({ error: 'Endpoint not found' }); +}); + +// Start the server +app.listen(PORT, () => { + console.log(`Event Management API server is running on port ${PORT}`); + console.log(`Available endpoints:`); + console.log(` GET /events - List all events`); + console.log(` POST /events - Create a new event`); + console.log(` GET /events/{id} - Get event by ID`); + console.log(` PATCH /events/{id} - Update event by ID`); + console.log(` DELETE /events/{id} - Delete event by ID`); +}); + +module.exports = app; +``` + +*** + +## Demander des outils + +Il existe 4 outils de demande qui peuvent être utilisés. Cela permet à LLM d'appeler les outils GET, Publier, mettre, supprimer les outils lorsque cela est nécessaire. + +### Étape 1: Ajoutez le nœud de démarrage + +Le nœud de démarrage est le point d'entrée de votre flux + + + +### Étape 2: Ajouter le nœud d'agent + +Ensuite, ajoutez un nœud d'agent. Dans cette configuration, l'agent est configuré avec quatre outils principaux: obtenir, publier, mettre et supprimer. Chaque outil est configuré pour effectuer un type spécifique de demande d'API. + +#### Outil 1: obtenir (récupérer les événements) + + + +* ** Objectif: ** Récupérez une liste d'événements ou un événement spécifique de l'API. +* ** Entrées de configuration: ** + * ** URL: **`http://localhost:5566/events` + * **Nom:**`get_events` + * ** Description: ** Décrivez quand utiliser cet outil. Par exemple:`Use this when you need to get events` + * ** En-têtes: ** (Facultatif) Ajouter tous les en-têtes HTTP requis. + * ** Schéma de paramètres de requête: ** Un schéma JSON de l'API qui permet à LLM de connaître la structure URL, quel chemin et des paramètres de requête à générer. Par exemple: + + ```json + { + "id": { + "type": "string", + "in": "path", + "description": "ID of the item to get. /:id" + }, + "limit": { + "type": "string", + "in": "query", + "description": "Limit the number of items to get. ?limit=10" + } + } + ``` + +#### Outil 2: Publier (créer l'événement) + +
+ +* ** Objectif: ** Créez un nouvel événement dans le système. +* ** Entrées de configuration: ** + * ** URL: **`http://localhost:5566/events` + * **Nom:**`create_event` + * **Description:**`Use this when you want to create a new event.` + * ** En-têtes: ** (Facultatif) Ajouter tous les en-têtes HTTP requis. + * ** Corps **: objet de corps codé dur qui remplacera le corps généré par LLM + * ** Schéma corporel: ** Un schéma JSON du corps de demande d'API qui permet à LLM de savoir comment générer automatiquement le corps JSON correct. Par exemple: + + ```json + { + "name": { + "type": "string", + "required": true, + "description": "Name of the event" + }, + "date": { + "type": "string", + "required": true, + "description": "Date of the event" + }, + "location": { + "type": "string", + "required": true, + "description": "Location of the event" + } + } + ``` + +#### Outil 3: put (événement de mise à jour) + +
+ +* ** Objectif: ** Supprimer un événement du système. +* ** Entrées de configuration: ** + * ** URL: **`http://localhost:5566/events` + * **Nom:**`delete_event` + * **Description:**`Use this when you need to delete an event.` + * ** En-têtes: ** (Facultatif) Ajouter tous les en-têtes HTTP requis. + * ** Schéma de paramètres de requête: ** Un schéma JSON de l'API qui permet à LLM de connaître la structure URL, quel chemin et des paramètres de requête à générer. Par exemple: + + ```json + { + "id": { + "type": "string", + "required": true, + "in": "path", + "description": "ID of the item to delete. /:id" + } + } + ``` + +### Comment l'agent utilise ces outils + +* L'agent peut sélectionner dynamiquement l'outil à utiliser en fonction de la demande de l'utilisateur ou de la logique du flux. +* Chaque outil est mappé sur une méthode HTTP spécifique et un point de terminaison, avec des schémas d'entrée clairement définis. +* L'agent exploite le LLM pour interpréter la saisie de l'utilisateur, remplir les paramètres requis et passer l'appel API approprié. + +Certainement! Voici quelques ** Exemples d'interactions ** pour votre flux, y compris les exemples de requêtes utilisateur et le comportement attendu pour chacun, mappé à l'outil correspondant (obtenir, publier, mettre, supprimer): + +### Exemples d'interactions + +#### 1. Récupérer les événements (obtenir) + +** Exemple de requête: ** + +> "Montrez-moi tous les événements à venir." + +** Comportement attendu: ** + +* L'agent sélectionne l'outil ** get **. +* Il envoie une demande de GET à`http://localhost:5566/events`. +* L'agent renvoie une liste de tous les événements à l'utilisateur. + +*** + +** Exemple de requête: ** + +> "Obtenez les détails de l'événement avec ID 12345." + +** Comportement attendu: ** + +* L'agent sélectionne l'outil ** get **. +* Il envoie une demande de GET à`http://localhost:5566/events/12345`. +* L'agent renvoie les détails de l'événement avec ID`12345`. + +*** + +#### 2. Créez un nouvel événement (post) + +** Exemple de requête: ** + +> "Créer un nouvel événement appelé 'AI Conference' le 2024-07-15 au Tech Hall." + +** Comportement attendu: ** + +* L'agent sélectionne l'outil ** Post **. +* Il envoie une demande de poste à`http://localhost:5566/events`avec le corps: + + ```json + { + "name": "AI Conference", + "date": "2024-07-15", + "location": "Tech Hall" + } + ``` +* L'agent confirme que l'événement a été créé et pourrait retourner les détails du nouvel événement. + +*** + +#### 3. Mettre à jour un événement (put) + +** Exemple de requête: ** + +> "Changez l'emplacement de l'événement« Conférence AI »le 2024-07-15 en« Auditorium principal ».» + +** Comportement attendu: ** + +* L'agent sélectionne l'outil ** put **. +* Il envoie une demande de vente à`http://localhost:5566/events`avec les détails de l'événement mis à jour: + + ```json + { + "name": "AI Conference", + "date": "2024-07-15", + "location": "Main Auditorium" + } + ``` +* L'agent confirme que l'événement a été mis à jour. + +*** + +#### 4. Supprimer un événement (supprimer) + +** Exemple de requête: ** + +> "Supprimer l'événement avec ID 12345." + +** Comportement attendu: ** + +* L'agent sélectionne l'outil ** supprimer **. +* Il envoie une demande de suppression à`http://localhost:5566/events/12345`. +* L'agent confirme que l'événement a été supprimé. + +### Débit complet + +{% fichier src = "../. gitbook / actifs / demandes d'outils agent.json"%} + +*** + +## Boîte à outils OpenAPI + +Les 4 outils de demande fonctionnent très bien si vous avez quelques API, mais imaginez avoir des dizaines ou des centaines d'API, cela pourrait devenir difficile à maintenir. Pour résoudre ce problème, Flowise fournit une boîte à outils OpenAPI qui est capable de prendre un fichier OpenAPI YAML et d'analyser chaque API dans un outil. Le[OpenAPI Specification (OAS)](https://swagger.io/specification/)est une norme universellement acceptée pour décrire les détails des API RESTfuls dans un format que les machines peuvent lire et interpréter. + +En utilisant l'API de gestion d'événements, nous pouvons générer un fichier OpenAPI YAML: + +```yaml +openapi: 3.0.0 +info: + version: 1.0.0 + title: Event Management API + description: An API for managing event data + +servers: + - url: http://localhost:5566 + description: Local development server + +paths: + /events: + get: + summary: List all events + operationId: listEvents + responses: + '200': + description: A list of events + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Event' + + post: + summary: Create a new event + operationId: createEvent + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EventInput' + responses: + '201': + description: The event was created + content: + application/json: + schema: + $ref: '#/components/schemas/Event' + '400': + description: Invalid input + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + + /events/{id}: + parameters: + - name: id + in: path + required: true + schema: + type: string + description: The event ID + + get: + summary: Retrieve an event by ID + operationId: getEventById + responses: + '200': + description: The event + content: + application/json: + schema: + $ref: '#/components/schemas/Event' + '404': + description: Event not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + + patch: + summary: Update an event's details by ID + operationId: updateEventDetails + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EventInput' + responses: + '200': + description: The event's details were updated + content: + application/json: + schema: + $ref: '#/components/schemas/Event' + '400': + description: Invalid input + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '404': + description: Event not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + + delete: + summary: Delete an event by ID + operationId: deleteEvent + responses: + '204': + description: The event was deleted + '404': + description: Event not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + +components: + schemas: + Event: + type: object + properties: + id: + type: string + description: The unique identifier for the event + name: + type: string + description: The name of the event + date: + type: string + format: date-time + description: The date and time of the event in ISO 8601 format + location: + type: string + description: The location of the event + required: + - name + - date + - location + + EventInput: + type: object + properties: + name: + type: string + description: The name of the event + date: + type: string + format: date-time + description: The date and time of the event in ISO 8601 format + location: + type: string + description: The location of the event + required: + - name + - date + - location + + Error: + type: object + properties: + error: + type: string + description: Error message +``` + +### Étape 1: Ajoutez le nœud de démarrage + +
+ +### Étape 2: Ajouter le nœud d'agent + +Ensuite, ajoutez un nœud d'agent. Dans cette configuration, l'agent est configuré avec un seul outil - OpenAPI Toolkit + +#### Outil: boîte à outils OpenAPI + +
+ +* ** Objectif: ** Récupérez la liste des API du fichier YAML et transformez toutes les API en liste des outils +* ** Entrées de configuration: ** + * ** Fichier YAML: ** Le fichier OpenAPI YAML + * ** RETOUR DIRECT: ** Si vous devez retourner la réponse de l'API directement + * ** En-têtes: ** (Facultatif) Ajouter tous les en-têtes HTTP requis. + * ** Supprimer les paramètres nuls: ** Supprimer toutes les clés avec des valeurs nulles des arguments analysés + * ** Code personnalisé **: Personnalisez la façon dont la réponse est retournée + +### Exemples d'interactions: + +Nous pouvons utiliser le même exemple de requêtes de l'exemple précédent pour le tester: + +
+ +*** + +## Appeler API séquentiellement + +À partir des exemples ci-dessus, nous avons vu comment l'agent peut appeler dynamiquement les outils et interagir avec les API. Dans certains cas, il peut être nécessaire d'appeler une API séquentiellement avant ou après certaines actions. Par exemple, vous pouvez récupérer une liste de clients à partir d'un CRM et la transmettre à un agent. Dans de tels cas, vous pouvez utiliser le[HTTP node](../using-flowise/agentflowv2.md#id-6.-http-node). + + + +## Meilleures pratiques + +* L'interaction avec les API est généralement utilisée lorsque vous souhaitez qu'un agent récupére les informations les plus à jour. Par exemple, un agent peut récupérer la disponibilité de votre calendrier, l'état du projet ou d'autres données en temps réel. +* Il est souvent utile d'inclure explicitement l'heure actuelle dans l'invite du système. Flowise fournit une variable appelée`{{current_date_time}}`, qui récupère la date et l'heure actuelles. Cela permet au LLM d'être conscient du moment présent, donc si vous posez des questions sur votre disponibilité pour aujourd'hui, le modèle peut faire référence à la date correcte. Sinon, il peut s'appuyer sur sa dernière date de coupure de formation, qui retournerait des informations obsolètes. Par exemple: + +``` +You are helpful assistant. + +Todays date time is {{current_date_time }} +``` diff --git a/fr/tutorials/rag.md b/fr/tutorials/rag.md new file mode 100644 index 00000000..f8f31ef1 --- /dev/null +++ b/fr/tutorials/rag.md @@ -0,0 +1,50 @@ +# CHIFFON + +Les modèles de grandes langues (LLMS) ont débloqué le potentiel de création de chatbots Q \ & A avancés capables de fournir des réponses précises en fonction du contenu spécifique. Ces systèmes reposent sur une méthode appelée génération (RAG) (RAG) de la récupération, ce qui améliore leurs réponses en les ancrant dans le matériel source pertinent. + +Dans ce tutoriel, vous apprendrez à créer une application Q \ & A de base qui peut extraire et répondre aux questions à partir de sources de documents données. + +Le processus peut être séparé en 2 sous-processus: + +* Indexage +* Récupération + +## Indexage + +[Document Stores](../using-flowise/document-stores.md)est conçu pour aider à l'ensemble des pipelines d'indexation - récupérer des données à partir de différentes sources, une stratégie de section, une augmentation de la base de données vectorielle, une synchronisation avec des données mises à jour. + +Nous prenons en charge un large éventail de chargeurs de documents, allant de fichiers comme PDF, Word, Google Drive, vers des grabyers Web comme Playwright, Firecrawl, Apify et autres. Vous pouvez également créer un chargeur de documents personnalisé. + +
+ +## Récupération + +En fonction de l'entrée de l'utilisateur, les morceaux de document pertinents sont récupérés à partir de la base de données vectorielle. LLM utilise ensuite le contexte récupéré pour générer une réponse. + +1. Faites glisser et déposez un[Agent](../using-flowise/agentflowv2.md#id-3.-agent-node)Node et configurez le modèle à utiliser. + +
+ +2. Ajoutez une nouvelle connaissance (magasin de documents) et définissez ce qu'est le contenu. Cela aide le LLM à comprendre quand et comment récupérer les informations pertinentes. Vous pouvez également utiliser le bouton de génération automatique pour aider à ce processus. + +{% Hint Style = "Success"%} +Seul le magasin de documents renversé peut être utilisé +{% EndHint%} + +
+ +3. (Facultatif) Si les données ont déjà été stockées dans une base de données vectorielle sans passer par le pipeline d'indexation du magasin de documents, vous pouvez également vous connecter directement à la base de données vectorielle et au modèle d'intégration. + +
+ +4. Ajoutez une invite système ou utilisez le bouton ** générer ** pour aider. Nous vous recommandons de l'utiliser, car cela aide à élaborer une invite plus efficace et optimisée. + +
+ +
+ +5. Votre agent de chiffon est maintenant prêt à l'emploi! + +## Ressources + +{% embed url = "https://youtu.be/khc0cloiv0a?si=mezjydm8bt2imkjy"%} diff --git a/fr/tutorials/sql-agent.md b/fr/tutorials/sql-agent.md new file mode 100644 index 00000000..c1ad126f --- /dev/null +++ b/fr/tutorials/sql-agent.md @@ -0,0 +1,366 @@ +# Agent SQL + +Ce didacticiel vous guidera à travers la création d'un agent SQL intelligent qui peut interagir avec les bases de données, générer des requêtes SQL, les valider, les exécuter et s'auto-correction lorsque des erreurs se produisent. + +## Aperçu + +Le flux d'agent SQL implémente un système d'interaction de base de données robuste qui: + +1. Récupération des informations sur le schéma de la base de données +2. Génère des requêtes SQL en fonction des questions des utilisateurs +3. Valide les requêtes générées pour les erreurs courantes +4. Exécute des requêtes contre la base de données +5. Vérifie les résultats des erreurs et des auto-corrects en cas de besoin +6. Fournit des réponses en langage naturel en fonction des résultats de la requête + +
+ +### Étape 1: Configuration du nœud de démarrage + +Commencez par ajouter un nœud ** start ** à votre toile. Cela sert de point d'entrée pour votre agent SQL. + +
+ +#### Configuration: + +* ** Type d'entrée **: Sélectionnez "CHAT ENTRE" pour accepter les questions des utilisateurs +* ** État de flux **: Ajoutez une variable d'état avec la clé "`sqlQuery`"Et une valeur vide + +Le nœud de démarrage initialise l'état de débit avec un vide`sqlQuery`variable qui stockera la requête SQL générée tout au long du processus. + +### Étape 2: Récupération du schéma de base de données + +Ajoutez un nœud de fonction ** personnalisé ** et connectez-le au nœud de démarrage. + +
+ +#### Configuration: + +* ** Fonction JavaScript **: Il s'agit d'un exemple de fonction qui se connecte à votre base de données et récupère le schéma complet, y compris les structures de table, les définitions de colonnes et les données d'échantillons. + +```javascript +const { DataSource } = require('typeorm'); + +const HOST = 'localhost'; +const USER = 'testuser'; +const PASSWORD = 'testpwd'; +const DATABASE = 'testdatabase'; +const PORT = 5432; + +let sqlSchemaPrompt = ''; + +const AppDataSource = new DataSource({ + type: 'postgres', + host: HOST, + port: PORT, + username: USER, + password: PASSWORD, + database: DATABASE, + synchronize: false, + logging: false, +}); + +async function getSQLPrompt() { + try { + await AppDataSource.initialize(); + const queryRunner = AppDataSource.createQueryRunner(); + + // Get all user-defined tables + const tablesResult = await queryRunner.query(` + SELECT table_name + FROM information_schema.tables + WHERE table_schema = 'public' AND table_type = 'BASE TABLE' + `); + + for (const tableRow of tablesResult) { + const tableName = tableRow.table_name; + const schemaInfo = await queryRunner.query(` + SELECT column_name, data_type, is_nullable + FROM information_schema.columns + WHERE table_name = '${tableName}' + `); + + const createColumns = []; + const columnNames = []; + + for (const column of schemaInfo) { + const name = column.column_name; + const type = column.data_type.toUpperCase(); + const notNull = column.is_nullable === 'NO' ? 'NOT NULL' : ''; + columnNames.push(name); + createColumns.push(`${name} ${type} ${notNull}`); + } + + const sqlCreateTableQuery = `CREATE TABLE ${tableName} (${createColumns.join(', ')})`; + const sqlSelectTableQuery = `SELECT * FROM ${tableName} LIMIT 3`; + + let allValues = []; + try { + const rows = await queryRunner.query(sqlSelectTableQuery); + allValues = rows.map(row => + columnNames.map(col => row[col]).join(' ') + ); + } catch (err) { + allValues.push('[ERROR FETCHING ROWS]'); + } + + sqlSchemaPrompt += + sqlCreateTableQuery + '\n' + + sqlSelectTableQuery + '\n' + + columnNames.join(' ') + '\n' + + allValues.join('\n') + '\n\n'; + } + + await queryRunner.release(); + } catch (err) { + console.error(err); + throw err; + } +} + +await getSQLPrompt(); +return sqlSchemaPrompt; +``` + +### Étape 3: Génération de requêtes SQL + +Ajoutez un nœud ** llm ** connecté au nœud "get db schéma". + +
+ +#### Configuration: + +* ** Messages **: Ajouter un message système: + +``` +You are an agent designed to interact with a SQL database. Given an input question, create a syntactically correct sqlite query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 5 results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for the relevant columns given the question. DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database. + +Here is the relevant table info: +{{ customFunctionAgentflow_0 }} + +Note: +- Only generate ONE SQL query +``` + +* ** Sortie structurée JSON **: Ici, nous instructons le modèle uniquement la sortie structurée, pour empêcher LLM d'inclure d'autres texte autres que la requête SQL. + * Clé: "`sql_query`" + * Type: "String" + * Description: "Query SQL" +* ** Mettre à jour l'état de flux **: Set Key "`sqlQuery`"Avec valeur`{{ output.sql_query }}` + +Ce nœud transforme la question du langage naturel de l'utilisateur en une requête SQL structurée à l'aide des informations de schéma de base de données. + +### Étape 4: validation de la syntaxe de requête SQL + +Ajoutez un nœud d'agent ** de condition ** connecté à la "Generate SQL Query" LLM. + +
+ +#### Configuration: + +* **Instructions**: + +``` +You are a SQL expert with a strong attention to detail. Double check the SQL query for common mistakes, including: +- Using NOT IN with NULL values +- Using UNION when UNION ALL should have been used +- Using BETWEEN for exclusive ranges +- Data type mismatch in predicates +- Properly quoting identifiers +- Using the correct number of arguments for functions +- Casting to the correct data type +- Using the proper columns for joins +``` + +* **Saisir**:`{{ $flow.state.sqlQuery }}` +* ** Scénarios **: + * Scénario 1: "La requête SQL est correcte et ne contient pas d'erreurs" + * Scénario 2: "La requête SQL contient des erreurs" + +Cette étape de validation attrape les erreurs SQL courantes avant l'exécution. + +### Étape 5: Gestion de la régénération des requêtes (chemin d'erreur) + +Pour les requêtes incorrectes (sortie 1) du nœud de l'agent de condition précédente, ajoutez un nœud ** boucle **. + +
+ +#### Configuration: + +
+ +* ** Loop Retour à **: "Générer la requête SQL" +* ** Count de boucle maximale **: réglé sur 5 + +Cela crée une boucle de rétroaction qui permet au système de réessayer la génération de requête lorsque la validation échoue. + +### Étape 6: Exécution des requêtes SQL valides + +Pour les requêtes correctes (sortie 0), ajoutez un nœud de fonction ** personnalisé **. + +
+ +* ** Variables d'entrée **: Ici, nous passons dans la requête SQL générée comme variable à utiliser dans la fonction. + * Nom de la variable: "SqlQuery" + * Valeur variable:`{{ $flow.state.sqlQuery }}` +* ** Fonction JavaScript **: Cette fonction exécute la requête SQL validée par rapport à la base de données et formate les résultats. + +```javascript +const { DataSource } = require('typeorm'); + +const HOST = 'localhost'; +const USER = 'testuser'; +const PASSWORD = 'testpwd'; +const DATABASE = 'testdatabase'; +const PORT = 5432; + +const sqlQuery = $sqlQuery; + +const AppDataSource = new DataSource({ + type: 'postgres', + host: HOST, + port: PORT, + username: USER, + password: PASSWORD, + database: DATABASE, + synchronize: false, + logging: false, +}); + +let formattedResult = ''; + +async function runSQLQuery(query) { + try { + await AppDataSource.initialize(); + const queryRunner = AppDataSource.createQueryRunner(); + + const rows = await queryRunner.query(query); + console.log('rows =', rows); + + if (rows.length === 0) { + formattedResult = '[No results returned]'; + } else { + const columnNames = Object.keys(rows[0]); + const header = columnNames.join(' '); + const values = rows.map(row => + columnNames.map(col => row[col]).join(' ') + ); + + formattedResult = query + '\n' + header + '\n' + values.join('\n'); + } + + await queryRunner.release(); + } catch (err) { + console.error('[ERROR]', err); + formattedResult = `[Error executing query]: ${err}`; + } + + return formattedResult; +} + +await runSQLQuery(sqlQuery); +return formattedResult; +``` + +### Étape 7: Vérification des résultats de l'exécution des requêtes + +Ajoutez un nœud d'agent ** de condition ** connecté à la fonction "Run SQL Query". + +
+ +#### Configuration: + +* ** Instructions **: "Vous êtes un expert SQL. Vérifiez si le résultat de la requête est correct ou contient une erreur." +* **Saisir**:`{{ customFunctionAgentflow_1 }}` +* ** Scénarios **: + * Scénario 1: "Le résultat est correct et ne contient pas d'erreur" + * Scénario 2: "La requête du résultat contient une erreur" + +Cette étape valide les résultats de l'exécution et détermine si une correction supplémentaire est nécessaire. + +### Étape 8: Génération de réponse finale (chemin de réussite) + +Pour des résultats réussis (sortie 0 de Condition Agent), ajoutez un nœud ** llm **. + +
+ +#### Configuration: + +* ** Message d'entrée **:`{{ customFunctionAgentflow_1 }}` + +Ce nœud génère une réponse en langage naturel basé sur les résultats de la requête réussis. + +### Étape 9: Gestion de la régénération des requêtes (chemin d'erreur d'exécution) + +Pour les exécutions défaillantes (sortie 1 de l'agent de condition), ajoutez un nœud ** llm **. + +
+ +#### Configuration: + +
+ +* ** Messages **: Ajoutez le même message système que l'étape 3 +* ** Message d'entrée **: + +``` +Given the generated SQL Query: {{ $flow.state.sqlQuery }} +I have the following error: {{ customFunctionAgentflow_1 }} +Regenerate a new SQL Query that will fix the error +``` + +* ** Sortie structurée JSON **: Identique à l'étape 3 +* ** Mettre à jour l'état de flux **: Set Key "`sqlQuery`"Avec valeur`{{ output.sql_query }}` + +Ce nœud analyse les erreurs d'exécution et génère des requêtes SQL corrigées. + +### Étape 10: ajout de la deuxième boucle + +Ajoutez un nœud ** LOOP ** connecté au "Regenerate SQL Query" LLM. + +
+ +#### Configuration: + +* ** Boucle de retour à **: "Vérifiez la requête SQL" +* ** Count de boucle maximale **: réglé sur 5 + +Cela crée une deuxième boucle de rétroaction pour la correction d'erreur d'exécution. + +*** + +## Structure d'écoulement complète + +{% fichier src = "../. gitbook / actifs / sql agent.json"%} + +*** + +## Résumé + +1. Démarrer → Obtenir le schéma DB +2. Obtenez un schéma DB → Générer une requête SQL +3. Générer la requête SQL → Vérifiez la requête SQL +4. Vérifiez la requête SQL (correcte) → Exécuter la requête SQL +5. Vérifiez la requête SQL (incorrecte) → Regérer la requête (boucle arrière) +6. Exécuter la requête SQL → Vérifier le résultat +7. Vérifier le résultat (succès) → Retour Response +8. Vérifier le résultat (erreur) → Regérer la requête SQL +9. Regenerate SQL Query → ReCheck SQL Query (boucle arrière) + +*** + +## Tester votre agent SQL + +Testez votre agent avec différents types de questions de base de données: + +* Requêtes simples: "Montrez-moi tous les clients" +* Requêtes complexes: "Quels sont les 5 meilleurs produits par vente?" +* Requêtes analytiques: "Calculez la valeur moyenne de l'ordre par mois" + +
+ +Ce flux d'agent SQL fournit un système robuste et auto-corrigé pour les interactions de base de données qui peuvent gérer les requêtes SQL en langage naturel. diff --git a/fr/tutorials/structured-output.md b/fr/tutorials/structured-output.md new file mode 100644 index 00000000..b5c6ff6f --- /dev/null +++ b/fr/tutorials/structured-output.md @@ -0,0 +1,181 @@ +# Sortie structurée + +Dans de nombreux cas d'utilisation, tels que les chatbots, les modèles devraient répondre aux utilisateurs en langage naturel. Cependant, il existe des situations où les réponses du langage naturel ne sont pas idéales. Par exemple, si nous devons prendre la sortie du modèle, le passer en tant que corps pour la demande HTTP ou stocker dans une base de données, il est essentiel que la sortie s'aligne sur un schéma prédéfini. Cette exigence donne naissance au concept de ** sortie structurée **, où les modèles sont guidés pour générer des réponses dans un format structuré spécifique. + +Dans ce tutoriel, nous allons jeter un œil à la façon de générer une sortie structurée à partir de LLM et de la passer comme le corps pour la demande HTTP. + +## Condition préalable + +Nous allons utiliser le même[Event Management Server](interacting-with-api.md#prerequisite)pour la demande HTTP. + +Absolument! Voici un tutoriel pour votre ** flux de sortie structuré ** dans un format cohérent avec votre documentation "Agent As Tool", y compris les explications étape par étape et les espaces réservés d'image. + +*** + +## Aperçu + +1. Reçoit l'entrée de l'utilisateur via un nœud de démarrage. +2. Utilise un LLM pour générer un tableau JSON structuré. +3. Boucle via chaque élément du tableau. +4. Envoie chaque élément via HTTP à un point de terminaison externe. + +
+ +### Étape 1: Configuration du nœud de démarrage + +Commencez par ajouter un nœud ** start ** à votre toile. + +
+ +** Paramètres d'entrée de clé: ** + +* ** Type d'entrée: ** + * `chatInput`(par défaut): le flux commence par un message de chat de l'utilisateur. + * `formInput`: Le flux commence par un formulaire (si vous souhaitez collecter des données structurées de l'utilisateur). +* ** Mémoire éphémère: ** + * (Facultatif) Si activé, le flux ne conserve pas l'historique de chat entre les exécutions. +* ** État de flux: ** + * (Facultatif) Pré-pupuler les variables d'état. + * Exemple: + + ```json + [ + { "key": "answers", "value": "" } + ] + ``` +* ** État persiste: ** + * (Facultatif) Si activé, l'état est persisté sur la même session. + +### Étape 2: Génération de sortie structurée avec LLM + +Ajoutez un nœud LLM et connectez-le au nœud de démarrage. + +
+ +** Objectif: ** utilise un modèle de langue pour analyser l'entrée et générer un tableau JSON structuré. + +** Paramètres d'entrée de clé: ** + +* ** Sortie structurée JSON: ** + * **Clé:**`answers` + * **Taper:**`JSON Array` + * ** schéma JSON: ** + + ```json + { + "name": { "type": "string", "required": true, "description": "Name of the event" }, + "date": { "type": "string", "required": true, "description": "Date of the event" }, + "location": { "type": "string", "required": true, "description": "Location of the event" } + } + ``` + * ** Description: ** "Réponse à la requête utilisateur" +* ** Mettez à jour l'état du flux: ** + * Met à jour l'état de flux avec la sortie JSON générée. + * Exemple: + + ```json + [ + { + "key": "answers", + "value": "{{ output.answers }}" + } + ] + ``` + +### Étape 3: faire une boucle dans le tableau JSON + +Ajoutez un nœud d'itération et connectez-le à la sortie du nœud LLM. + +
+ +** Objectif: ** itère sur chaque élément du tableau JSON généré à partir du nœud LLM. + +** Paramètres d'entrée de clé: ** + +* ** Entrée du tableau: ** + + * Le tableau pour itérer. Réglé sur les réponses de l'état enregistré: + + ```html + {{ $flow.state.answers }} + ``` + + * Cela signifie que le nœud traversera chaque événement dans le tableau des réponses. + +### Étape 4: Envoi de chaque élément via HTTP + +À l'intérieur de la boucle, ajoutez un nœud ** http **. + +
+ +** Objectif: ** Pour chaque élément du tableau, envoie une demande de message HTTP à un point de terminaison spécifié (par exemple,`http://localhost:5566/events`). + +** Paramètres d'entrée de clé: ** + +* **Méthode:** + * `POST`(par défaut pour ce cas d'utilisation). +* ** URL: ** + * Le point de terminaison pour envoyer des données. + * Exemple: + + ``` + http://localhost:5566/events + ``` +* ** Headers: ** + * (Facultatif) Ajouter tous les en-têtes HTTP requis (par exemple, pour l'authentification). +* ** Paramètres de requête: ** + * (Facultatif) Ajouter tous les paramètres de requête si nécessaire. +* ** Type de corps: ** + * `json`(par défaut): envoie le corps en JSON. +* **Corps:** + * Les données à envoyer dans le corps de la demande. + * Définir sur l'élément actuel dans la boucle: + + ```html + {{ $iteration }} + ``` +* ** Type de réponse: ** + * `json`(par défaut): attend une réponse JSON. + +*** + +## Exemples d'interactions + +** Entrée utilisateur: ** + +``` +create 2 events: +1. JS Conference on next Sat in Netherlands +2. GenAI meetup, Sept 19, in Dublin +``` + +**Couler:** + +* Le nœud de démarrage reçoit l'entrée. +* Le nœud LLM génère un tableau JSON d'événements. +* Le nœud de boucle itère dans chaque événement. +* Le nœud http créez chaque événement via l'API. + +
+ +
+ +*** + +## Structure d'écoulement complète + +{% fichier src = "../. GitBook / Assets / Structured Output.json"%} + +*** + +## Meilleures pratiques + +** Directives de conception: ** + +1. ** Schéma de sortie effacer: ** Définissez la structure attendue de la sortie LLM pour assurer un traitement fiable en aval. + +** Cas d'utilisation courants: ** + +* ** Traitement des événements: ** Collectez et envoyez des données d'événements à un calendrier ou un système de gestion d'événements. +* ** Entrée de données en masse: ** Générer et soumettre plusieurs enregistrements à une base de données ou à l'API. +* ** Notifications automatisées: ** Envoyer des messages ou des alertes personnalisés pour chaque élément d'une liste. diff --git a/fr/tutorials/supervisor-and-workers.md b/fr/tutorials/supervisor-and-workers.md new file mode 100644 index 00000000..780e201b --- /dev/null +++ b/fr/tutorials/supervisor-and-workers.md @@ -0,0 +1,205 @@ +# Superviseur et travailleurs + +Le modèle de travailleur superviseur est une conception puissante de workflow où un agent superviseur coordonne plusieurs agents de travailleurs spécialisés pour effectuer des tâches complexes. Ce modèle permet une meilleure délégation des tâches, une expertise spécialisée et un raffinement itératif des solutions. + +## Aperçu + +Dans ce tutoriel, nous allons créer un système collaboratif avec: + +* ** Superviseur **: un LLM qui analyse les tâches et décide quel travailleur doit agir ensuite +* ** Ingénieur logiciel **: Spécialisé dans la conception et la mise en œuvre de solutions logicielles +* ** Code Reviewer **: axé sur la révision de la qualité du code et la fourniture de commentaires +* ** Générateur de réponses finales **: compile le travail collaboratif dans une solution complète + +
+ +### Étape 1: Créez le nœud de démarrage + +
+ +Le flux commence par un nœud ** start ** qui capture l'entrée utilisateur et initialise l'état de flux de travail. + +1. Ajoutez un nœud ** start ** à votre toile +2. Configurez le ** type d'entrée ** comme "entrée de chat" +3. Configurer ** État de flux ** avec ces variables initiales: + * `next`: Pour garder une trace du prochain agent + * `instruction`: Instruction pour le prochain agent sur ce qu'il faut faire + +
+ +### Étape 2: Ajouter le superviseur LLM + +
+ +Le ** superviseur ** est l'orchestrateur qui décide quel travailleur doit gérer chaque partie de la tâche. + +1. Connectez un nœud ** llm ** après le nœud de démarrage +2. Étiquetez-le "superviseur" +3. Configurez le message système, par exemple: + +``` +You are a supervisor tasked with managing a conversation between the following workers: +- Software Engineer +- Code Reviewer + +Given the following user request, respond with the worker to act next. +Each worker will perform a task and respond with their results and status. +When finished, respond with FINISH. +Select strategically to minimize the number of steps taken. +``` + +4. Configurer ** Sortie structurée JSON ** avec ces champs: + * `next`: Enum avec les valeurs "Finition, logiciel, critique" + * `instructions`: Les instructions spécifiques du sous-tâche que le prochain travailleur devrait accomplir + * `reasoning`: La raison pour laquelle le prochain travailleur est chargé de faire le travail +5. Configurer ** Mettre à jour l'état de flux ** pour stocker: + * `next`: `{{ output.next }}` + * `instruction`: `{{ output.instructions }}` +6. Définissez le ** Message d'entrée ** sur: _ "Compte tenu de la conversation ci-dessus, qui devrait agir ensuite? Ou devrions-nous terminer? Sélectionnez un de: logiciel, examinateur." _ Le message d'entrée sera inséré à la fin, comme si l'utilisateur demandait au superviseur d'attribuer le prochain agent. + +
+ +### Étape 3: Créez la condition de routage + +
+ +Le ** Vérifiez le nœud de condition du travailleur Next ** achemine le flux en fonction de la décision du superviseur. + +1. Ajouter un nœud ** condition ** après le superviseur +2. Configurez deux conditions: + * ** Condition 0 **:`{{ $flow.state.next }}`est égal à "logiciel" + * ** Condition 1 **:`{{ $flow.state.next }}`est égal à "réviseur" +3. La branche "Else" (condition 2) gérera le boîtier "Finish" + +Cela crée trois chemins de sortie: un pour chaque travailleur et un pour l'achèvement. + +
+ +### Étape 4: Configurer l'agent d'ingénieur logiciel + +
+ +
+ +### Étape 6: Ajoutez des connexions de retour en boucle + +
+ +Les deux agents des travailleurs doivent retourner au superviseur pour une coordination continue. + +1. Ajouter un nœud ** boucle ** après l'ingénieur logiciel + * Définissez ** la boucle à ** en tant que "superviseur" + * Set ** MAX LOOP COUNT ** à 5 +2. Ajoutez un autre nœud ** LOOP ** après le réviseur de code + * Définissez ** la boucle à ** en tant que "superviseur" + * Set ** MAX LOOP COUNT ** à 5 + +Ces boucles permettent une collaboration itérative entre les agents. + +### Étape 7: Créez le générateur de réponses final + +
+ +## Structure d'écoulement complète + +{% fichier src = "../. GitBook / Assets / Superviseur Agents Worker.json"%} + +## Meilleures pratiques + +* Cette architecture consomme beaucoup de jetons en raison des communications de va-et-vient entre les agents, il ne convient donc pas à tous les cas. Il est particulièrement efficace pour: + * Tâches de développement de logiciels nécessitant à la fois la mise en œuvre et l'examen + * Résolution de problèmes complexes qui profite de plusieurs perspectives + * Les flux de travail où la qualité et l'itération sont importantes + * Tâches qui nécessitent une coordination entre les différents types d'expertise +* Assurez-vous que chaque agent a un rôle spécifique bien défini. Évitez les responsabilités qui se chevauchent qui pourraient entraîner une confusion ou un travail redondant. +* Établir des formats standard pour la façon dont les agents communiquent leurs progrès, leurs résultats et leurs recommandations. Cela aide le superviseur à prendre de meilleures décisions de routage. +* Utilisez les paramètres de mémoire de manière appropriée pour maintenir le contexte de la conversation tout en évitant les problèmes de limite de jetons. Envisagez d'utiliser des paramètres d'optimisation de la mémoire comme le "tampon de résumé de conversation" pour les workflows plus longs. + +## Tutoriel vidéo + +{% embed url = "https://youtu.be/tbzaj5szcbm?si=e4nxn__hhzjbnwdf"%} diff --git a/fr/tutorials/tools-and-mcp.md b/fr/tutorials/tools-and-mcp.md new file mode 100644 index 00000000..7c02fc99 --- /dev/null +++ b/fr/tutorials/tools-and-mcp.md @@ -0,0 +1,266 @@ +# Outils et MCP + +Dans le précédent[**Interacting with API**](interacting-with-api.md)Tutoriel, nous avons exploré comment permettre aux LLM d'appeler des API externes. Pour améliorer l'expérience utilisateur, Flowise fournit une liste d'outils préfabillés. Reportez-vous au[**Tools**](../integrations/langchain/tools/)Section pour la liste complète des intégrations disponibles. + +Dans les cas où l'outil dont vous avez besoin n'est pas encore disponible, vous pouvez créer un ** outil personnalisé ** pour répondre à vos besoins. + +## Outil personnalisé + +Nous allons utiliser le même[Event Management Server](interacting-with-api.md#prerequisite), et créez un outil personnalisé qui peut appeler la demande HTTP Post pour`/events`. + +
+ +* ** Nom de l'outil: **`create_event` +* ** Description de l'outil: **`Use this when you want to create a new event.` +* ** Schéma d'entrée: ** Un schéma JSON du corps de la demande d'API qui permet à LLM de savoir comment générer automatiquement le corps JSON correct. Par exemple: +* ** Fonction JavaScript **: la fonction réelle à exécuter une fois cet outil appelé + +```javascript +const fetch = require('node-fetch'); +const url = 'http://localhost:5566/events'; +const options = { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + name: $name, + location: $location, + date: $date + }) +}; +try { + const response = await fetch(url, options); + const text = await response.text(); + return text; +} catch (error) { + console.error(error); + return ''; +} +``` + +### Comment utiliser la fonction: + +* Vous pouvez utiliser toutes les bibliothèques importées dans Flowise. +* Vous pouvez utiliser des propriétés spécifiées dans le schéma d'entrée comme variables avec préfixe`# Outils et MCP + +Dans le précédent[**Interacting with API**](interacting-with-api.md)Tutoriel, nous avons exploré comment permettre aux LLM d'appeler des API externes. Pour améliorer l'expérience utilisateur, Flowise fournit une liste d'outils préfabillés. Reportez-vous au[**Tools**](../integrations/langchain/tools/)Section pour la liste complète des intégrations disponibles. + +Dans les cas où l'outil dont vous avez besoin n'est pas encore disponible, vous pouvez créer un ** outil personnalisé ** pour répondre à vos besoins. + +## Outil personnalisé + +Nous allons utiliser le même[Event Management Server](interacting-with-api.md#prerequisite), et créez un outil personnalisé qui peut appeler la demande HTTP Post pour`/events`. + +
+ +* ** Nom de l'outil: **`create_event` +* ** Description de l'outil: **`Use this when you want to create a new event.` +* ** Schéma d'entrée: ** Un schéma JSON du corps de la demande d'API qui permet à LLM de savoir comment générer automatiquement le corps JSON correct. Par exemple: +* ** Fonction JavaScript **: la fonction réelle à exécuter une fois cet outil appelé + +```javascript +const fetch = require('node-fetch'); +const url = 'http://localhost:5566/events'; +const options = { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + name: $name, + location: $location, + date: $date + }) +}; +try { + const response = await fetch(url, options); + const text = await response.text(); + return text; +} catch (error) { + console.error(error); + return ''; +} +``` + +### Comment utiliser la fonction: + +* Vous pouvez utiliser toutes les bibliothèques importées dans Flowise. +* Vous pouvez utiliser des propriétés spécifiées dans le schéma d'entrée comme variables avec préfixe: + * Propriété du schéma d'entrée =`name` + * Variable à utiliser dans la fonction =`$name` +* Vous pouvez obtenir une configuration de flux par défaut: + * `$flow.sessionId` + * `$flow.chatId` + * `$flow.chatflowId` + * `$flow.input` + * `$flow.state` +* Vous pouvez obtenir des variables personnalisées:`$vars.` +* Doit renvoyer une valeur de chaîne à la fin de la fonction + +### Utilisez un outil personnalisé sur l'agent + +Une fois l'outil personnalisé créé, vous pouvez l'utiliser sur le nœud d'agent. + +
+ +Dans la liste déroulante de l'outil, sélectionnez l'outil personnalisé. Vous pouvez également activer ** return direc ** t si vous souhaitez renvoyer directement la sortie de l'outil personnalisé. + +
+ +### Utilisez l'outil personnalisé sur l'outil + +Il peut également être utilisé comme nœud d'outil dans un scénario de flux de travail déterminé. \ +Dans ce cas, ** Les arguments d'entrée de l'outil doivent être explicitement définis et remplis de valeurs **, car il n'y a pas de LLM pour déterminer automatiquement les valeurs. + +
+ +## MCP + +MCP ([Model Context Protocol](https://modelcontextprotocol.io/introduction)) fournit un moyen standardisé de connecter les modèles d'IA à différentes sources de données et outils. En d'autres termes, au lieu de compter sur des outils Flowise intégrés ou de créer un outil personnalisé, on peut utiliser des serveurs MCP qui ont été créés par d'autres. MCP est largement considéré comme une norme de l'industrie et est généralement soutenu et maintenu par les prestataires officiels. Par exemple, le GitHub MCP est développé et maintenu par l'équipe GitHub, avec un soutien similaire fourni pour Atlassian Jira, Brave Search, et autres. Vous pouvez trouver la liste des serveurs pris en charge[here](https://modelcontextprotocol.io/examples). + +
+ +## MCP personnalisé + +Outre les outils MCP prédéfinis, la fonctionnalité la plus puissante est ** personnalisé MCP **, qui permet aux utilisateurs de se connecter à tout serveur MCP de leur choix. + +MCP suit une architecture client-serveur où: + +* ** hôtes ** sont des applications LLM (comme Flowise) qui initient des connexions +* ** Clients ** Maintenir des connexions 1: 1 avec des serveurs, à l'intérieur de l'application hôte (comme MCP personnalisé) +* ** Serveurs ** Fournir un contexte, des outils et des invites aux clients (exemple[servers](https://modelcontextprotocol.io/examples)) + +Pour gérer la communication réelle entre les clients et les serveurs. MCP prend en charge plusieurs mécanismes de transport: + +1. ** STdio Transport ** + * Utilise une entrée / sortie standard pour la communication + * Idéal pour les processus locaux +2. ** Transport HTTP streamable ** + * Utilise HTTP avec des événements de serveur en option pour le streaming + * HTTP Post pour les messages du client à serveur + +### Stdio + +STdio Transport permet la communication via des flux d'entrée et de sortie standard. Ceci est particulièrement utile pour les intégrations locales et les outils de ligne de commande. + +Utilisez-le uniquement lors de l'utilisation de Flowise localement, pas lorsqu'il est déployé sur les services cloud. C'est parce que l'exécution de la commande comme`npx`Installera le package MCP Server (Ex:`@modelcontextprotocol/server-sequential-thinking`) localement, et cela prend souvent du temps pour cela. + +Il est plus adapté à l'application de bureau comme Claude Desktop, vs code, etc. + +#### ** Commande NPX ** + +```json +{ + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-sequential-thinking" + ] +} +``` + +
+ +Pour Windows, reportez-vous[guide](https://gist.github.com/feveromo/7a340d7795fca1ccd535a5802b976e1f). + +#### ** Commande Docker ** + +La commande docker convient lorsque la machine en cours d'exécution a également accès à Docker. Cependant, il ne convient pas aux déploiements sur les services cloud où l'accès Docker est restreint ou indisponible. + +```json +{ + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "mcp/sequentialthinking" + ] +} +``` + +
+ +Docker fournit une liste de serveurs MCP, qui peuvent être trouvés[here](https://hub.docker.com/catalogs/mcp). Voici comment cela fonctionne: + +1. Assurez-vous que Docker fonctionne. +2. Localisez la configuration du serveur MCP et ajoutez-le à ** MCP personnalisé **. Par exemple:[https://hub.docker.com/r/mcp/sequentialthinking](https://hub.docker.com/r/mcp/sequentialthinking) +3. Actualisez les ** actions disponibles **. Si l'image n'est pas trouvée localement, Docker tirera automatiquement la dernière image. Une fois l'image tirée, vous verrez la liste des actions disponibles. + +``` +Unable to find image 'mcp/sequentialthinking:latest' locally +latest: Pulling from mcp/sequentialthinking +f18232174bc9: Already exists +cb2bde55f71f: Pull complete +9d0e0719fbe0: Pull complete +6f063dbd7a5d: Pull complete +93a0fbe48c24: Pull complete +e2e59f8d7891: Pull complete +96ec0bda7033: Pull complete +4f4fb700ef54: Pull complete +d0900e07408c: Pull complete +Digest: sha256:cd3174b2ecf37738654cf7671fb1b719a225c40a78274817da00c4241f465e5f +Status: Downloaded newer image for mcp/sequentialthinking:latest +Sequential Thinking MCP Server running on stdio +``` + +#### Quand utiliser + +* Construire des outils de ligne de commande +* Implémentation de intégrations locales +* Besoin d'une communication de processus simple +* Travailler avec des scripts shell + +### HTTP diffusable (recommandé) + +Nous utiliserons GitHub Remote MCP comme exemple. La belle partie de[Remote GitHub MCP server](https://github.com/github/github-mcp-server), vous n'avez pas besoin de l'installer ou de l'exécuter localement, les nouvelles mises à jour sont appliquées automatiquement. + +#### Étape 1: Créez une variable pour GitHub Pat + +Afin d'accéder au serveur MCP, nous devons créer un jeton d'accès personnel à partir de GitHub. Se référer à[guide](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic). Une fois PAT créé, créez une variable pour stocker le jeton. Cette variable sera utilisée dans MCP personnalisé. + +
+ +#### Étape 2: Créez un MCP personnalisé + +Créez un nœud d'agent et ajoutez un nouvel outil MCP personnalisé. Pour HTTP diffusable, nous avons juste besoin de mettre l'URL et d'autres en-têtes nécessaires. Vous pouvez utiliser[variables](../using-flowise/variables.md)Dans la configuration du serveur MCP avec des accolades à double boucle`{{ }}`et préfixe`$vars.`. + +```json +{ + "url": "https://api.githubcopilot.com/mcp/", + "headers": { + "Authorization": "Bearer {{$vars.githubPAT}}", + } +} +``` + +
+ +#### Étape 3: Sélectionnez les actions + +Si la configuration du serveur MCP fonctionne correctement, vous pouvez actualiser les actions ** disponibles ** et Flowise tirera automatiquement toutes les actions disponibles à partir du serveur MCP. + +
+ +#### Exemples d'interactions: + +> Donnez-moi le problème le plus récent + +
+ +L'agent est en mesure d'identifier les actions appropriées de MCP et de les utiliser pour répondre à la requête de l'utilisateur. + +#### Quand utiliser + +Utilisez HTTP Streamable lorsque: + +* Construire des intégrations Web +* Besoin de communication client-serveur sur HTTP +* Nécessiter des séances avec état +* Soutenir plusieurs clients simultanés +* Implémentation de connexions à reprise + +## Tutoriel vidéo + +{% embed url = "https://youtu.be/7fcli-qm3tk?si=zbneshd3nlcroBro"%} diff --git a/fr/use-cases/README.md b/fr/use-cases/README.md new file mode 100644 index 00000000..101a4b7c --- /dev/null +++ b/fr/use-cases/README.md @@ -0,0 +1,21 @@ +--- +description: Learn to build your own Flowise solutions through practical examples +--- + +# Cas d'utilisation + +*** + +Cette section fournit une collection d'exemples pratiques pour démontrer comment Flowise peut être utilisé pour construire une variété de solutions. + +Chaque cas d'utilisation vous guidera à travers le processus de conception, de construction et de déploiement d'applications du monde réel en utilisant Flowise. + +## Guides + +* [Calling Children Flows](calling-children-flows.md) +* [Calling Webhook](webhook-tool.md) +* [Interacting with API](interacting-with-api.md) +* [Multiple Documents QnA](multiple-documents-qna.md) +* [SQL QnA](sql-qna.md) +* [Upserting Data](upserting-data.md) +* [Web Scrape QnA](web-scrape-qna.md) diff --git a/fr/use-cases/calling-children-flows.md b/fr/use-cases/calling-children-flows.md new file mode 100644 index 00000000..9949de53 --- /dev/null +++ b/fr/use-cases/calling-children-flows.md @@ -0,0 +1,91 @@ +--- +description: Learn how to effectively use the Chatflow Tool and the Custom Tool +--- + +# Appeler les enfants coule + +*** + +L'une des caractéristiques puissantes de Flowise est que vous pouvez transformer les flux en outils. Par exemple, avoir un flux principal pour orchestrer qui / quand utiliser les outils nécessaires. Et chaque outil est conçu pour effectuer une nièce / une chose spécifique. + +Cela offre quelques avantages: + +* Chaque enfant coule comme l'outil s'exécutera seul, avec une mémoire séparée pour permettre une sortie plus propre +* L'agrégation des sorties détaillées de chaque enfant circule vers un agent final, se traduit souvent par une sortie de meilleure qualité + +Vous pouvez y parvenir en utilisant les outils suivants: + +* Outil Chatflow +* Outil personnalisé + +## Outil Chatflow + +1. Ayez un ChatFlow prêt. Dans ce cas, nous créons une chaîne de chat de pensée qui peut passer par plusieurs chaînes. + +
+ +2. Créez un autre ChatFlow avec l'agent d'outils + outil ChatFlow. Sélectionnez le ChatFlow que vous souhaitez appeler dans l'outil. Dans ce cas, c'était une chaîne de pensée Chatflow. Donnez-lui un nom et une description appropriée pour permettre à LLM de savoir quand utiliser cet outil: + +
+ +3. Testez-le! + +
+ +4. Dans la réponse, vous pouvez voir l'entrée et la sortie de l'outil ChatFlow: + +
+ +## Outil personnalisé + +Avec le même exemple que ci-dessus, nous allons créer un outil personnalisé qui appellera le[Prediction API](broken-reference)de la chaîne de pensée Chatflow. + +1. Créer un nouvel outil: + + Nom de l'outil outil Description ideas_flow Utilisez cet outil lorsque vous avez besoin d'atteindre certains objectifs + +Schéma d'entrée: + + Propriété Type Description Obligatoire Entrée string Entrée Question true + +
+ +Fonction JavaScript de l'outil: + +```javascript +const fetch = require('node-fetch'); +const url = 'http://localhost:3000/api/v1/prediction/'; // replace with specific chatflow id + +const body = { + "question": $input +}; + +const options = { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(body) +}; + +try { + const response = await fetch(url, options); + const resp = await response.json(); + return resp.text; +} catch (error) { + console.error(error); + return ''; +} +``` + +2. Créez un outil d'agent d'outils + personnalisé. Spécifiez l'outil que nous avons créé à l'étape 1 dans l'outil personnalisé. + +
+ +3. À partir de la réponse, vous pouvez voir l'entrée et la sortie de l'outil personnalisé: + +
+ +## Conclusion + +Dans cet exemple, nous avons réussi à démontrer 2 façons de transformer d'autres chatflows en outils, via l'outil ChatFlow et l'outil personnalisé. Les deux utilisent la même logique de code sous le capot. diff --git a/fr/use-cases/interacting-with-api.md b/fr/use-cases/interacting-with-api.md new file mode 100644 index 00000000..722c5283 --- /dev/null +++ b/fr/use-cases/interacting-with-api.md @@ -0,0 +1,191 @@ +--- +description: Learn how to use external API integrations with Flowise +--- + +# Interagir avec l'API + +*** + +La spécification OpenAPI (OAS) définit une interface standard et agnostique linguistique aux API HTTP. L'objectif de ce cas d'utilisation est que le LLM détermine automatiquement quelle API appelle, tout en ayant une conversation avec état avec l'utilisateur. + +## Chaîne OpenAPI + +1. Dans ce tutoriel, nous allons utiliser[Klarna OpenAPI](https://gist.github.com/HenryHengZJ/b60f416c42cb9bcd3160fe797421119a) + +{% code overflow = "wrap"%} +```json +{ + "openapi": "3.0.1", + "info": { + "version": "v0", + "title": "Open AI Klarna product Api" + }, + "servers": [ + { + "url": "https://www.klarna.com/us/shopping" + } + ], + "tags": [ + { + "name": "open-ai-product-endpoint", + "description": "Open AI Product Endpoint. Query for products." + } + ], + "paths": { + "/public/openai/v0/products": { + "get": { + "tags": [ + "open-ai-product-endpoint" + ], + "summary": "API for fetching Klarna product information", + "operationId": "productsUsingGET", + "parameters": [ + { + "name": "countryCode", + "in": "query", + "description": "ISO 3166 country code with 2 characters based on the user location. Currently, only US, GB, DE, SE and DK are supported.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "q", + "in": "query", + "description": "A precise query that matches one very small category or product that needs to be searched for to find the products the user is looking for. If the user explicitly stated what they want, use that as a query. The query is as specific as possible to the product name or category mentioned by the user in its singular form, and don't contain any clarifiers like latest, newest, cheapest, budget, premium, expensive or similar. The query is always taken from the latest topic, if there is a new topic a new query is started. If the user speaks another language than English, translate their request into English (example: translate fia med knuff to ludo board game)!", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "size", + "in": "query", + "description": "number of products returned", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "min_price", + "in": "query", + "description": "(Optional) Minimum price in local currency for the product searched for. Either explicitly stated by the user or implicitly inferred from a combination of the user's request and the kind of product searched for.", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "max_price", + "in": "query", + "description": "(Optional) Maximum price in local currency for the product searched for. Either explicitly stated by the user or implicitly inferred from a combination of the user's request and the kind of product searched for.", + "required": false, + "schema": { + "type": "integer" + } + } + ], + "responses": { + "200": { + "description": "Products found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProductResponse" + } + } + } + }, + "503": { + "description": "one or more services are unavailable" + } + }, + "deprecated": false + } + } + }, + "components": { + "schemas": { + "Product": { + "type": "object", + "properties": { + "attributes": { + "type": "array", + "items": { + "type": "string" + } + }, + "name": { + "type": "string" + }, + "price": { + "type": "string" + }, + "url": { + "type": "string" + } + }, + "title": "Product" + }, + "ProductResponse": { + "type": "object", + "properties": { + "products": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Product" + } + } + }, + "title": "ProductResponse" + } + } + } +} +``` +{% Endcode%} + +2. Vous pouvez utiliser un[JSON to YAML converter](https://jsonformatter.org/json-to-yaml)et enregistrer comme un`.yaml`Fixez et téléchargez-le sur ** OpenAPI Chain **, puis testez en posant des questions. ** OpenAPI Chain ** Enverra toutes les spécifications à LLM et que le LLM utilise automatiquement la méthode et les paramètres corrects pour l'appel API. + +
+ +3. Cependant, si vous voulez avoir un chat de conversation normal, il ne peut pas le faire. Vous verrez l'erreur suivante. En effet, la chaîne OpenAPI a l'invite suivante: + +``` +Use the provided API's to respond to this user query +``` + +Puisque nous avons "forcé" qu'il trouve toujours l'API pour répondre à la requête utilisateur, dans les cas d'une conversation normale qui n'est pas pertinente pour l'OpenAPI, il ne le fait pas. + +
+ +L'utilisation de cette méthode peut ne pas fonctionner bien si vous avez de grandes spécifications OpenAPI. En effet, nous incluons toutes les spécifications dans le cadre du message envoyé à LLM. Nous comptons ensuite sur LLM pour déterminer l'URL, les paramètres de requête corrects, le corps de demande et les autres paramètres nécessaires nécessaires pour répondre à la requête utilisateur. Comme vous pouvez l'imaginer, si vos spécifications OpenAPI sont compliquées, il y a un risque plus élevé que LLM hallucine. + +## Agent d'outils + boîte à outils OpenAPI + +Afin de résoudre l'erreur ci-dessus, nous pouvons utiliser l'agent. Du livre de cuisine officiel d'Openai:[Function calling with an OpenAPI specification](https://cookbook.openai.com/examples/function_calling_with_an_openapi_spec), il est recommandé de convertir chaque API en un outil lui-même, au lieu de nourrir toutes les API en LLM en tant que message unique. Un agent est également capable d'avoir une interaction humaine, avec la possibilité de décider quel outil utiliser en fonction de la requête de l'utilisateur. + +OpenAPI Toolkit convertira chacun des API du fichier YAML en un ensemble d'outils. De cette façon, les utilisateurs n'ont pas à créer un[Custom Tool](../integrations/langchain/tools/custom-tool.md)pour chaque API. + +1. Connectez ** Toolagent ** avec ** OpenAPI Toolkit **. Ici, nous téléchargeons la spécification YAML pour l'API OpenAI. Le fichier de spécifications peut être trouvé en bas de la page. + +
+ +2. Essayons-le! + +
+ +Comme vous pouvez le remarquer à partir du chat, l'agent est capable de mener une conversation normale et d'utiliser un outil approprié pour répondre à la requête utilisateur. Si vous utilisez l'outil analytique, vous pouvez voir la liste des outils que nous avons convertis à partir du fichier YAML: + +
+ +## Ascension + +1. Trouvez l'exemple de flux appelé - ** Chaîne de QA de récupération conversationnelle ** à partir des modèles de marché. +2. Nous allons utiliser[PDF File Loader](../integrations/langchain/document-loaders/pdf-file.md)et télécharger les fichiers respectifs: + +
+ +3. Cliquez sur ** Paramètres supplémentaires ** du chargeur de fichiers PDF et spécifiez l'objet de métadonnées. Par exemple, le fichier PDF avec Apple Form-10K téléchargé peut avoir un objet de métadonnées`{source: apple}`, tandis que le fichier PDF avec Tesla Form-10K téléchargé peut avoir`{source: tesla}`. Ceci est fait pour Seggregate les documents pendant l'heure de récupération. + +
+ +4. Après avoir rempli les informations d'identification pour Pinecone, cliquez sur Upsert: + +
+ +
+ +5. Sur[Pinecone console](https://app.pinecone.io)Vous pourrez voir les nouveaux vecteurs qui ont été ajoutés. + +
+ +## Requête + +1. Une fois que les données ont été renvoyées sur Pinecone, nous pouvons maintenant commencer à poser une question dans le chat! + +
+ +2. Cependant, le contexte récupéré utilisé pour retourner la réponse est un mélange de documents Apple et Tesla. Comme vous pouvez le voir sur les documents source: + +
+ +3. Nous pouvons résoudre ce problème en spécifiant un filtre de métadonnées à partir du nœud de pinone. Par exemple, si nous voulons seulement récupérer le contexte d'Apple Form-10k, nous pouvons regarder en arrière les métadonnées que nous avons spécifiées plus tôt dans le[#upsert](multiple-documents-qna.md#upsert "mention")étape, puis utilisez la même chose dans le filtre des métadonnées ci-dessous: + +
+ +4. Posons à nouveau la même question, nous devrions maintenant voir tout le contexte récupéré provient en effet d'Apple Form-10k: + +
+ +{% hint style = "info"%} +Chaque fournisseur de données vectorielle a un format différent de syntaxe de filtrage, recommande de lire la documentation de la base de données vectorielle respective +{% EndHint%} + +5. Cependant, le problème avec cela est que le filtrage des métadonnées est une sorte de _ ** "codé dur" ** _. Idéalement, nous devons laisser le LLM décider quel document récupérer en fonction de la question. + +## Agent d'outils + +Nous pouvons résoudre le problème du filtre de métadonnées _ ** "codé dur" ** _ en utilisant[Tool Agent](../integrations/langchain/agents/tool-agent.md). + +En fournissant des outils à l'agent, nous pouvons laisser l'agent à décider quel outil est approprié à utiliser en fonction de la question. + +1. Créer un[Retriever Tool](../integrations/langchain/tools/retriever-tool.md)avec le nom et la description suivants: + + name Description search_apple Utilisez cette fonction pour répondre aux questions des utilisateurs sur Apple Inc (Appl). Il contient un dossier de formulaire SEC 10K décrivant les finances d'Apple Inc (Appl) pour la période de 2022. + +2. Connectez-vous au nœud de pignon avec un filtre de métadonnées`{source: apple}` + +
+ +3. Répétez la même chose pour Tesla: + + name Description PineCone Metadata Filter Search_tsla Utiliser cette fonction pour répondre aux questions d'utilisation sur Tesla Inc (Tsla). Il contient un dossier SEC 10K décrivant les finances de Tesla Inc (TSLA) pour la période de 2022. {source: Tesla} + +{% hint style = "info"%} +Il est important de spécifier une description claire et concise. Cela permet à LLM de mieux décider quand utiliser quel outil +{% EndHint%} + +Votre flux devrait ressembler ci-dessous: + +
+ +4. Maintenant, nous devons créer une instruction générale à l'agent d'outils. Cliquez sur ** Paramètres supplémentaires ** du nœud et spécifiez le ** Message système **. Par exemple: + +``` +You are an expert financial analyst that always answers questions with the most relevant information using the tools at your disposal. +These tools have information regarding companies that the user has expressed interest in. +Here are some guidelines that you must follow: +* For financial questions, you must use the tools to find the answer and then write a response. +* Even if it seems like your tools won't be able to answer the question, you must still use them to find the most relevant information and insights. Not using them will appear as if you are not doing your job. +* You may assume that the users financial questions are related to the documents they've selected. +* For any user message that isn't related to financial analysis, respectfully decline to respond and suggest that the user ask a relevant question. +* If your tools are unable to find an answer, you should say that you haven't found an answer but still relay any useful information the tools found. +* Dont ask clarifying questions, just return answer. + +The tools at your disposal have access to the following SEC documents that the user has selected to discuss with you: +- Apple Inc (APPL) FORM 10K 2022 +- Tesla Inc (TSLA) FORM 10K 2022 + +The current date is: 2024-01-28 +``` + +5. Enregistrez le chat et commencez à poser une question! + +
+ +
+ +6. Suivi avec Tesla: + +
+ +7. Nous sommes maintenant en mesure de poser des questions sur tous les documents que nous avons précédemment renversés dans la base de données vectorielle sans "codage dur" le filtrage des métadonnées en utilisant des outils + agent. + +## Retriever des métadonnées + +Avec l'approche de l'agent d'outils, l'utilisateur doit créer plusieurs outils Retriever pour récupérer des documents à partir de différentes sources. Cela pourrait être un problème s'il existe un grand nombre de sources de documents avec différentes métadonnées. En utilisant l'exemple ci-dessus avec uniquement Apple et Tesla, nous pourrions potentiellement s'étendre à d'autres sociétés telles que Disney, Amazon, etc. Il serait une tâche fastidieuse de créer un outil de retrever pour chaque entreprise. + +Metadata Retriever entre en jeu. L'idée est de demander à LLM d'extraire les métadonnées de la question de l'utilisateur, puis de l'utiliser comme filtre lors de la recherche dans les bases de données vectorielles. + +Par exemple, si un utilisateur pose des questions liées à Apple, un filtre de métadonnées`{source: apple}`sera automatiquement appliqué sur la recherche de base de données vectorielle. + +
 width =
+ +Dans ce scénario, nous pouvons avoir un seul outil Retriever et placer le ** Metadata Retriever ** entre la base de données vectorielle et l'outil Retriever. + +
+ +## Agent XML + +Pour certaines LLM, les capacités des appels de fonction ne sont pas prises en charge. Dans ce cas, nous pouvons utiliser l'agent XML pour inviter le LLM dans un format / syntaxe plus structuré, dans le but d'utiliser les outils fournis. + +Il a l'invite sous-jacente: + +```xml +You are a helpful assistant. Help the user answer any questions. + +You have access to the following tools: + +{tools} + +In order to use a tool, you can use and tags. You will then get back a response in the form +For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond: + +searchweather in SF +64 degrees + +When you are done, respond with a final answer between . For example: + +The weather in SF is 64 degrees + +Begin! + +Previous Conversation: +{chat_history} + +Question: {input} +{agent_scratchpad} +``` + +
+ +## Conclusion + +Nous avons couvert l'utilisation de la chaîne QA de récupération conversationnelle et sa limitation lors de l'interrogation de plusieurs documents. Et nous avons pu surmonter le problème en utilisant l'agent de fonction OpenAI / Agent XML + outils. Vous pouvez trouver les modèles ci-dessous: + +{% fichier src = "../. gitbook / actifs / toolagent chatflow.json"%} + +{% fichier src = "../. GitBook / Assets / xmlagent chatflow.json"%} diff --git a/fr/use-cases/sql-qna.md b/fr/use-cases/sql-qna.md new file mode 100644 index 00000000..2779511e --- /dev/null +++ b/fr/use-cases/sql-qna.md @@ -0,0 +1,335 @@ +--- +description: Learn how to query structured data +--- + +# SQL QNA + +*** + +Contrairement aux exemples précédents comme[Web Scrape QnA](web-scrape-qna.md)et[Multiple Documents QnA](multiple-documents-qna.md), interroger les données structurées ne nécessite pas de base de données vectorielle. Au niveau élevé, cela peut être réalisé avec les étapes suivantes: + +1. Fournir le LLM: + * Aperçu du schéma de base de données SQL + * Exemples de données de lignes +2. Renvoyez une requête SQL avec peu d'incitation +3. Valider la requête SQL à l'aide d'un[If Else](../integrations/utilities/if-else.md)nœud +4. Créez une fonction personnalisée pour exécuter la requête SQL et obtenez la réponse +5. Renvoyer une réponse naturelle de la réponse SQL exécutée + +
+ +Dans cet exemple, nous allons créer un chatbot QNA qui peut interagir avec une base de données SQL stockée à Singlestore + +
+ +## Tl; dr + +Vous pouvez trouver le modèle ChatFlow: + +{% fichier src = "../. gitbook / actifs / sql chatflow.json"%} + +## 1. Schéma de base de données SQL + Exemples de lignes + +Utilisez un nœud de fonction JS personnalisé pour vous connecter à Singlestore, récupérer le schéma de base de données et les 3 premières lignes. + +De[research paper](https://arxiv.org/abs/2204.00498), il est recommandé de générer une invite avec le format d'exemple suivant: + +``` +CREATE TABLE samples (firstName varchar NOT NULL, lastName varchar) +SELECT * FROM samples LIMIT 3 +firstName lastName +Stephen Tyler +Jack McGinnis +Steven Repici +``` + +
+ + + + Code JavaScript complet + +```javascript +const HOST = 'singlestore-host.com'; +const USER = 'admin'; +const PASSWORD = 'mypassword'; +const DATABASE = 'mydb'; +const TABLE = 'samples'; +const mysql = require('mysql2/promise'); + +let sqlSchemaPrompt; + +function getSQLPrompt() { + return new Promise(async (resolve, reject) => { + try { + const singleStoreConnection = mysql.createPool({ + host: HOST, + user: USER, + password: PASSWORD, + database: DATABASE, + }); + + // Get schema info + const [schemaInfo] = await singleStoreConnection.execute( + `SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = "${TABLE}"` + ); + + const createColumns = []; + const columnNames = []; + + for (const schemaData of schemaInfo) { + columnNames.push(`${schemaData['COLUMN_NAME']}`); + createColumns.push(`${schemaData['COLUMN_NAME']} ${schemaData['COLUMN_TYPE']} ${schemaData['IS_NULLABLE'] === 'NO' ? 'NOT NULL' : ''}`); + } + + const sqlCreateTableQuery = `CREATE TABLE samples (${createColumns.join(', ')})`; + const sqlSelectTableQuery = `SELECT * FROM samples LIMIT 3`; + + // Get first 3 rows + const [rows] = await singleStoreConnection.execute( + sqlSelectTableQuery, + ); + + const allValues = []; + for (const row of rows) { + const rowValues = []; + for (const colName in row) { + rowValues.push(row[colName]); + } + allValues.push(rowValues.join(' ')); + } + + sqlSchemaPrompt = sqlCreateTableQuery + '\n' + sqlSelectTableQuery + '\n' + columnNames.join(' ') + '\n' + allValues.join('\n'); + + resolve(); + } catch (e) { + console.error(e); + return reject(e); + } + }); +} + +async function main() { + await getSQLPrompt(); +} + +await main(); + +return sqlSchemaPrompt; +``` + + + +Vous pouvez en savoir plus sur la façon d'obtenir le`HOST`, `USER`, `PASSWORD`de cette[guide](broken-reference/). Une fois terminé, cliquez sur Exécuter: + +
+ +Nous pouvons maintenant voir que le bon format a été généré. La prochaine étape consiste à mettre cela dans le modèle invite. + +## 2. Renvoyez une requête SQL avec peu d'incitation + +Créer un nouveau modèle de chat + modèle d'invite + llmchain + +
+ +Spécifiez l'invite suivante dans le modèle d'invite: + +``` +Based on the provided SQL table schema and question below, return a SQL SELECT ALL query that would answer the user's question. For example: SELECT * FROM table WHERE id = '1'. +------------ +SCHEMA: {schema} +------------ +QUESTION: {question} +------------ +SQL QUERY: +``` + +Puisque nous utilisons 2 variables: {schéma} et {question}, spécifiez leurs valeurs dans les valeurs d'invite de format ** **: + +
+ +{% hint style = "info"%} +Vous pouvez fournir plus d'exemples à l'invite (c'est-à-dire une invitation à quelques coups) pour permettre que le LLM apprenne mieux. Ou faire référence à[dialect-specific prompting](https://js.langchain.com/docs/use\_cases/sql/prompting#dialect-specific-prompting) +{% EndHint%} + +## 3. Valider la requête SQL en utilisant[If Else](../integrations/utilities/if-else.md)nœud + +Parfois, la requête SQL n'est pas valide, et nous ne voulons pas gaspiller les ressources de l'exécution d'une requête SQL non valide. Par exemple, si un utilisateur pose une question générale qui n'est pas pertinente pour la base de données SQL. Nous pouvons utiliser un`If Else`Node pour se rendre à un chemin différent. + +Par exemple, nous pouvons effectuer une vérification de base pour voir si Sélectionner et où sont incluses dans la requête SQL donnée par le LLM. + +{% Tabs%} +{% tab title = "if function"%} +```javascript +const sqlQuery = $sqlQuery.trim(); + +const regex = /SELECT\s.*?(?:\n|$)/gi; + +// Extracting the SQL part +const matches = sqlQuery.match(regex); +const cleanSql = matches ? matches[0].trim() : ""; + +if (cleanSql.includes("SELECT") && cleanSql.includes("WHERE")) { + return cleanSql; +} +``` +{% endtab%} + +{% tab title = "else function"%} +```javascript +return $sqlQuery; +``` +{% endtab%} +{% endtabs%} + +
+ +Dans la fonction ELSE, nous irons vers un modèle rapide + llmchain qui indique essentiellement à LLM qu'il n'est pas en mesure de répondre à la requête utilisateur: + +
+ +## 4. Fonction personnalisée pour exécuter la requête SQL et obtenir la réponse + +S'il s'agit d'une requête SQL valide, nous devons exécuter la requête. Connectez la sortie _ ** true ** _ de ** nœud if else ** à une fonction ** js personnalisée ** nœud: + +
+ + + + Code JavaScript complet + +```javascript +const HOST = 'singlestore-host.com'; +const USER = 'admin'; +const PASSWORD = 'mypassword'; +const DATABASE = 'mydb'; +const TABLE = 'samples'; +const mysql = require('mysql2/promise'); + +let result; + +function getSQLResult() { + return new Promise(async (resolve, reject) => { + try { + const singleStoreConnection = mysql.createPool({ + host: HOST, + user: USER, + password: PASSWORD, + database: DATABASE, + }); + + const [rows] = await singleStoreConnection.execute( + $sqlQuery + ); + + result = JSON.stringify(rows) + + resolve(); + } catch (e) { + console.error(e); + return reject(e); + } + }); +} + +async function main() { + await getSQLResult(); +} + +await main(); + +return result; +``` + + + +## 5. Renvoie une réponse naturelle de la réponse SQL exécutée + +Créer un nouveau modèle de chat + modèle d'invite + llmchain + +
+ +Écrivez l'invite suivante dans le modèle d'invite: + +``` +Based on the question, and SQL response, write a natural language response, be details as possible: +------------ +QUESTION: {question} +------------ +SQL RESPONSE: {sqlResponse} +------------ +NATURAL LANGUAGE RESPONSE: +``` + +Spécifiez les variables dans les valeurs d'invite de format ** **: + +
+ +Le tour est joué! Votre chatbot SQL est maintenant prêt pour les tests! + +## Requête + +Tout d'abord, demandons quelque chose lié à la base de données. + +
+ +En regardant les journaux, nous pouvons voir que le premier LLMCHAIN ​​est capable de nous donner une requête SQL: + +**Saisir:** + +{% code overflow = "wrap"%} +``` +Based on the provided SQL table schema and question below, return a SQL SELECT ALL query that would answer the user's question. For example: SELECT * FROM table WHERE id = '1'.\n------------\nSCHEMA: CREATE TABLE samples (id bigint(20) NOT NULL, firstName varchar(300) NOT NULL, lastName varchar(300) NOT NULL, userAddress varchar(300) NOT NULL, userState varchar(300) NOT NULL, userCode varchar(300) NOT NULL, userPostal varchar(300) NOT NULL, createdate timestamp(6) NOT NULL)\nSELECT * FROM samples LIMIT 3\nid firstName lastName userAddress userState userCode userPostal createdate\n1125899906842627 Steven Repici 14 Kingston St. Oregon NJ 5578 Thu Dec 14 2023 13:06:17 GMT+0800 (Singapore Standard Time)\n1125899906842625 John Doe 120 jefferson st. Riverside NJ 8075 Thu Dec 14 2023 13:04:32 GMT+0800 (Singapore Standard Time)\n1125899906842629 Bert Jet 9th, at Terrace plc Desert City CO 8576 Thu Dec 14 2023 13:07:11 GMT+0800 (Singapore Standard Time)\n------------\nQUESTION: what is the address of John\n------------\nSQL QUERY: +``` +{% Endcode%} + +**Sortir** + +
   Sélectionnez UserAddress à partir d'échantillons où FirstName = 'John'
+  
+ +Après avoir exécuté la requête SQL, le résultat est transmis au 2e llmchain: + +**Saisir** + +{% code overflow = "wrap"%} +``` +Based on the question, and SQL response, write a natural language response, be details as possible:\n------------\nQUESTION: what is the address of John\n------------\nSQL RESPONSE: [{\"userAddress\":\"120 jefferson st.\"}]\n------------\nNATURAL LANGUAGE RESPONSE: +``` +{% Endcode%} + +**Sortir** + +``` +The address of John is 120 Jefferson St. +``` + +Maintenant, si nous demandons quelque chose qui n'est pas pertinent pour la base de données SQL, l'itinéraire ELSE est emprunté. + +
+ +Pour le premier llmchain, une requête SQL est générée comme ci-dessous: + +```sql +SELECT * FROM samples LIMIT 3 +``` + +Cependant, il échoue`If Else`Vérifiez car il ne contient pas les deux`SELECT`et`WHERE`, donc entrant sur l'itinéraire des autres qui a une invite qui dit: + +``` +Politely say "I'm not able to answer query" +``` + +Et la sortie finale est: + +``` +I apologize, but I'm not able to answer your query at the moment. +``` + +## Conclusion + +Dans cet exemple, nous avons créé avec succès un chatbot SQL qui peut interagir avec votre base de données et est également en mesure de gérer des questions qui ne sont pas pertinentes pour la base de données. Une amélioration supplémentaire comprend l'ajout de mémoire pour fournir l'historique des conversations. + +Vous pouvez trouver le Chatflow ci-dessous: + +{% fichier src = "../. gitbook / actifs / sql chatflow (1) .json"%} diff --git a/fr/use-cases/upserting-data.md b/fr/use-cases/upserting-data.md new file mode 100644 index 00000000..334fdc5b --- /dev/null +++ b/fr/use-cases/upserting-data.md @@ -0,0 +1,159 @@ +--- +description: Learn how to upsert data to Vector Stores with Flowise +--- + +# Augmenter les données + +*** + +Il existe deux façons fondamentales de renforcer vos données dans un[Vector Store](../integrations/langchain/vector-stores/)en utilisant Flowise, soit via[API calls](broken-reference)ou en utilisant un ensemble de nœuds dédiés, nous avons prêt à cet effet. + +Dans ce guide, même s'il est ** hautement recommandé ** que vous préparez vos données en utilisant le[Document Stores](../using-flowise/document-stores.md)Avant de renverser un magasin vectoriel, nous passerons par l'ensemble du processus en utilisant les nœuds spécifiques nécessaires à cette fin, en décrivant les étapes, les avantages de cette approche et les stratégies d'optimisation pour une gestion efficace des données. + +## Comprendre le processus de mise en service + +La première chose que nous devons comprendre est que le processus de données de mise en valeur d'un[Vector Store](../integrations/langchain/vector-stores/)est une pièce fondamentale pour la formation d'un[Retrieval Augmented Generation (RAG)](multiple-documents-qna.md)système. Cependant, une fois ce processus terminé, le chiffon peut être exécuté indépendamment. + +En d'autres termes, dans Flowise, vous pouvez améliorer les données sans configuration complète de chiffon, et vous pouvez exécuter votre chiffon sans les nœuds spécifiques utilisés dans le processus Upsert, ce qui signifie que bien qu'un magasin vectoriel bien peuplé soit crucial pour que le chiffon fonctionne, les processus de récupération et de génération réels ne nécessitent pas de mise en service continu. + +

Upsert vs Rag

+ +## Installation + +Supposons que nous ayons un long jeu de données au format PDF que nous avons besoin pour user à notre[Upstash Vector Store](../integrations/langchain/vector-stores/upstash-vector.md)Nous pourrions donc demander à un LLM de récupérer des informations spécifiques de ce document. + +Pour ce faire, et pour illustrer ce tutoriel, nous aurions besoin de créer un flux ** à la mise à la hausse ** avec 5 nœuds différents: + +

Flow upsert

+ +## 1. Chargeur de documents + +La première étape consiste à ** télécharger nos données PDF dans l'instance Flowise ** en utilisant un[Document Loader node](../integrations/langchain/document-loaders/). Les chargeurs de documents sont des nœuds spécialisés qui gèrent l'ingestion de divers formats de documents, y compris ** pdfs **, ** txt **, ** csv **, pages de notion, et plus encore. + +Il est important de mentionner que chaque chargeur de documents est livré avec deux paramètres importants ** supplémentaires ** qui nous permettent d'ajouter et d'omettre les métadonnées de notre ensemble de données à volonté. + +

Paramètres supplémentaires

+ +{% hint style = "info"%} +** TIP **: Les paramètres de métadonnées ADD / OMIT, bien qu'ils soient facultatifs, sont très utiles pour cibler notre ensemble de données une fois qu'il est déposé dans un magasin vectoriel ou pour enlever les métadonnées inutiles. +{% EndHint%} + +## 2. Splitter de texte + +Une fois que nous avons téléchargé notre PDF ou notre ensemble de données, nous devons ** le diviser en pièces, documents ou morceaux plus petits **. Il s'agit d'une étape de prétraitement cruciale pour 2 raisons principales: + +* ** Vitesse et pertinence de récupération: ** Le stockage et l'interrogation de gros documents en tant qu'entités uniques dans une base de données vectorielle peuvent conduire à des temps de récupération plus lents et à des résultats potentiellement moins pertinents. La division du document en morceaux plus petits permet une récupération plus ciblée. En interrogeant contre des unités d'information plus petites et plus ciblées, nous pouvons atteindre des temps de réponse plus rapides et améliorer la précision des résultats récupérés. +* ** RETENDANT: ** Puisque nous ne récupérons que des morceaux pertinents plutôt que le document entier, le nombre de jetons traités par le LLM est considérablement réduit. Cette approche de récupération ciblée se traduit directement par une baisse des coûts d'utilisation de notre LLM, car la facturation est généralement basée sur la consommation de jetons. En minimisant la quantité d'informations non pertinentes envoyées à la LLM, nous optimisons également pour le coût. + +### Nœuds + +En flux, ce processus de division est accompli en utilisant le[Text Splitter nodes](../integrations/langchain/text-splitters/). Ces nœuds fournissent une gamme de stratégies de segmentation de texte, notamment: + +* ** Clissage du texte des caractères: ** Divide le texte en morceaux d'un nombre fixe de caractères. Cette méthode est simple mais peut diviser des mots ou des phrases sur des morceaux, perturbant potentiellement le contexte. +* ** Diffusion du texte de jeton: ** Segmentation du texte en fonction des limites des mots ou des schémas de tokenisation spécifiques au modèle d'intégration choisi. Cette approche conduit souvent à des morceaux plus cohérents sémantiquement, car il préserve les limites des mots et considère la structure linguistique sous-jacente du texte. +* ** Diffusion du texte récursif du caractère: ** Cette stratégie vise à diviser le texte en morceaux qui maintiennent la cohérence sémantique tout en restant dans une limite de taille spécifiée. Il est particulièrement bien adapté aux documents hiérarchiques avec des sections ou des titres imbriqués. Au lieu de se diviser aveuglément à la limite de caractère, il analyse récursivement le texte pour trouver des points d'arrêt logiques, tels que les fins de phrase ou les ruptures de section. Cette approche garantit que chaque morceau représente une unité d'information significative, même si elle dépasse légèrement la taille cible. +* ** Splitter de texte de marque: ** Conçu spécifiquement pour les documents formulés Markdown, ce séparateur segmente logiquement le texte basé sur des en-têtes de démarque et des éléments structurels, créant des morceaux qui correspondent à des sections logiques dans le document. +* ** Splitter de texte de code: ** Adapté pour la division des fichiers de code, cette stratégie considère la structure du code, les définitions de fonction et d'autres éléments spécifiques au langage de programmation pour créer des morceaux significatifs qui conviennent aux tâches telles que la recherche et la documentation de code. +* ** Splitter de texte HTML à markdown: ** Ce séparateur spécialisé convertit d'abord le contenu HTML à Markdown, puis applique le séparateur de texte Markdown, permettant une segmentation structurée des pages Web et d'autres documents HTML. + +Les nœuds de séparateur de texte fournissent un contrôle granulaire sur la segmentation du texte, permettant la personnalisation de paramètres tels que: + +* ** Taille du morceau: ** La taille maximale souhaitée de chaque morceau, généralement définie en caractères ou en jetons. +* ** chevauchement de morceaux: ** Le nombre de caractères ou de jetons à chevaucher entre des morceaux consécutifs, utile pour maintenir le flux contextuel à travers des morceaux. + +{% hint style = "info"%} +** Astuce: ** Notez que la taille du morceau et les valeurs de chevauchement des morceaux ne sont pas additives. Sélection`chunk_size=1200`et`chunk_overlap=400`Ne se traduit pas par une taille totale de 1600. La valeur de chevauchement détermine le nombre de jetons du morceau précédent inclus dans le morceau actuel pour maintenir le contexte. Il n'augmente pas la taille globale du morceau. +{% EndHint%} + +### Chevauchement de morceaux + +Dans le contexte de la récupération basée sur les vecteurs et de la requête LLM, le chevauchement de morceaux joue un ** rôle important dans le maintien de la continuité contextuelle ** et ** Amélioration de la précision de la réponse **, en particulier lorsqu'il s'agit d'une profondeur de récupération limitée ou ** Top K **, qui est le paramètre qui détermine le nombre maximum de la plupart des morceaux similaires qui sont récupérés à partir de la[Vector Store](../integrations/langchain/vector-stores/)en réponse à une requête. + +Pendant le traitement des requêtes, le LLM exécute une recherche de similitude contre le magasin vectoriel pour récupérer les morceaux les plus pertinents sémantiquement à la requête donnée. Si la profondeur de récupération, représentée par le paramètre K supérieur, est définie sur une petite valeur, 4 pour par défaut, le LLM utilise initialement des informations uniquement à partir de ces 4 morceaux pour générer sa réponse. + +Ce scénario nous présente un problème, car le fait de s'appuyer uniquement sur un nombre limité de morceaux sans chevauchement peut entraîner des réponses incomplètes ou inexactes, en particulier lorsqu'ils traitent des requêtes qui nécessitent des informations couvrant plusieurs morceaux. + +Le chevauchement des morceaux aide à ce problème en s'assurant qu'une partie du contexte textuel est partagée sur des morceaux consécutifs, ** augmentant la probabilité que toutes les informations pertinentes pour une requête donnée soient contenues dans les morceaux récupérés **. + +En d'autres termes, ce chevauchement sert de pont entre des morceaux, permettant au LLM d'accéder à une fenêtre contextuelle plus large même lorsqu'elle est limitée à un petit ensemble de morceaux récupérés (haut K). Si une requête est liée à un concept ou à une information qui s'étend au-delà d'un seul morceau, les régions qui se chevauchent augmentent la probabilité de capturer tout le contexte nécessaire. + +Par conséquent, en introduisant un chevauchement de morceaux pendant la phase de division du texte, nous améliorons la capacité du LLM à: + +1. ** Préserver la continuité contextuelle: ** Les morceaux qui se chevauchent fournissent une transition plus fluide des informations entre les segments consécutifs, permettant au modèle de maintenir une compréhension plus cohérente du texte. +2. ** Améliorer la précision de la récupération: ** En augmentant la probabilité de capturer toutes les informations pertinentes dans le top k cible k récupéré, le chevauchement contribue à des réponses plus précises et plus appropriées. + +### Précision vs coût + +Ainsi, pour optimiser davantage le compromis entre la précision de la récupération et le coût, deux stratégies primaires peuvent être utilisées: + +1. ** Le chevauchement d'augmentation / diminution du morceau: ** L'ajustement du pourcentage de chevauchement pendant la division de texte permet un contrôle à grain fin sur la quantité de contexte partagé entre les morceaux. Des pourcentages de chevauchement plus élevés entraînent généralement une amélioration de la préservation du contexte, mais peuvent également augmenter les coûts, car vous devez utiliser plus de morceaux pour englober l'ensemble du document. À l'inverse, des pourcentages de chevauchement plus faibles peuvent réduire les coûts, mais risquent de perdre des informations contextuelles clés entre les morceaux, conduisant potentiellement à des réponses moins précises ou incomplètes du LLM. +2. ** Augmentation / diminution du top k: ** L'augmentation de la valeur K supérieure par défaut (4) élargit le nombre de morceaux considérés pour la génération de réponse. Bien que cela puisse améliorer la précision, cela augmente également les coûts. + +{% hint style = "info"%} +** Astuce: ** Le choix des valeurs optimales ** des chevauchement ** et ** Top K ** dépend de facteurs tels que la complexité du document, les caractéristiques du modèle d'intégration et l'équilibre souhaité entre la précision et le coût. L'expérimentation avec ces valeurs est importante pour trouver la configuration idéale pour un besoin spécifique. +{% EndHint%} + +## 3. Incorporer + +Nous avons maintenant téléchargé notre ensemble de données et configuré comment nos données seront divisées avant qu'elle ne se renforce à notre[Vector Store](../integrations/langchain/vector-stores/). À ce point,[the embedding nodes](../integrations/langchain/embeddings/)Entrez en jeu, ** convertir tous ces morceaux en une "langue" qu'un LLM peut facilement comprendre **. + +Dans ce contexte actuel, l'intégration est le processus de conversion du texte en une représentation numérique qui capture sa signification. Cette représentation numérique, également appelée vecteur d'intégration, est un tableau de nombres multidimensionnel, où chaque dimension représente un aspect spécifique de la signification du texte. + +Ces vecteurs permettent aux LLM de comparer et de rechercher des morceaux de texte similaires dans le magasin vectoriel en mesurant la distance ou la similitude entre eux dans cet espace multidimensionnel. + +### Comprendre les dimensions des intégres / vecteurs des vecteurs + +Le nombre de dimensions dans un indice de magasin vectoriel est déterminé par le modèle d'incorporation utilisé lorsque nous augmentons nos données, et vice versa. Chaque dimension représente une fonction ou un concept spécifique dans les données. Par exemple, une ** dimension ** pourrait ** représenter un sujet, un sentiment ou un autre aspect particulier du texte **. + +Plus nous utilisons de dimensions pour intégrer nos données, plus le potentiel de capture de sens nuancé de notre texte est grand. Cependant, cette augmentation se fait au prix des exigences de calcul plus élevées par requête. + +En général, un plus grand nombre de dimensions nécessite plus de ressources pour stocker, traiter et comparer les vecteurs d'intégration résultants. Par conséquent, des modèles intégrés comme le Google`embedding-001`, qui utilise 768 dimensions, sont, en théorie, moins chères que d'autres comme l'Openai`text-embedding-3-large`, avec 3072 dimensions. + +Il est important de noter que la ** relation entre les dimensions et la capture de sens n'est pas strictement linéaire **; Il y a un point de rendement décroissant où l'ajout de dimensions offre un avantage négligeable pour le coût inutile supplémentaire. + +{% hint style = "info"%} +** Astuce: ** Pour garantir la compatibilité entre un modèle d'incorporation et un indice de magasin vectoriel, l'alignement dimensionnel est essentiel. Le modèle et l'index doivent utiliser le même nombre de dimensions pour la représentation vectorielle **. L'inadéquation de la dimensionnalité entraînera des erreurs de mise en service, car le magasin vectoriel est conçu pour gérer les vecteurs d'une taille spécifique déterminée par le modèle d'incorporation choisi. +{% EndHint%} + +## 4. Magasin vectoriel + +Le[Vector Store node](../integrations/langchain/vector-stores/)est le ** nœud de fin de notre flux de mise en service **. Il agit comme le pont entre notre instance Flowise et notre base de données vectorielle, nous permettant d'envoyer les intérêts générés, ainsi que toutes les métadonnées associées, à notre index du magasin de vecteur cible pour le stockage persistant et la récupération ultérieure. + +C'est dans ce nœud où nous pouvons définir des paramètres comme "** TOP K **", qui, comme nous l'avons dit précédemment, est le paramètre qui détermine le nombre maximum des morceaux similaires qui sont récupérés du magasin vectoriel en réponse à une requête. + +
+ +{% hint style = "info"%} +** CONSEIL: ** Une valeur K supérieure inférieure donnera des résultats moins, mais potentiellement plus pertinents, tandis qu'une valeur plus élevée renverra une gamme plus large de résultats, capturant potentiellement plus d'informations. +{% EndHint%} + +## 5. Record Manager + +Le[Record Manager node](../integrations/langchain/record-managers.md)est un ajout facultatif mais incroyablement utile à notre flux de mise en service. Il nous permet de maintenir des enregistrements de tous les morceaux qui ont été renversés vers notre magasin vectoriel, nous permettant d'ajouter ou de supprimer efficacement des morceaux au besoin. + +Pour un guide plus approfondi, nous vous référons à[this guide](../integrations/langchain/record-managers.md). + +
+ +## 6. Aperçu complet + +Enfin, examinons chaque étape, du chargement initial du document à la représentation du vecteur final, mettant en évidence les composants clés et leurs rôles dans le processus de mise en service. + +
+ +1. ** Document Ingesttion **: + * Nous commençons par nourrir nos données brutes en flux en utilisant le nœud de chargeur de document ** approprié ** pour votre format de données. +2. ** Diffusion stratégique ** + * Ensuite, le nœud ** du séparateur de texte ** divise notre document en morceaux plus petits et plus gérables. Ceci est crucial pour une récupération et un contrôle des coûts efficaces. + * Nous avons une flexibilité dans la façon dont cette division se produit en sélectionnant le nœud de séparateur de texte approprié et, surtout, par une taille de morceau de réglage fin et un chevauchement de morceaux pour équilibrer la préservation du contexte avec l'efficacité. +3. ** des incorporations significatives ** + * Maintenant, juste avant que nos données ne soient enregistrées dans le magasin vectoriel, le ** nœud d'intégration ** intervient. Il transforme chaque morceau de texte et sa signification en une représentation numérique que notre LLM peut comprendre. +4. ** Index du magasin vectoriel ** + * Enfin, le nœud ** Vector Store ** agit comme le pont entre Flowise et notre base de données. Il envoie nos intérêts, ainsi que toutes les métadonnées associées, à l'indice de magasin vectoriel désigné. + * Ici, dans ce nœud, nous pouvons contrôler le comportement de récupération en définissant le paramètre ** supérieur k **, qui influence le nombre de morceaux considérés lors de la réponse à une requête. +5. ** Données prêtes ** + * Une fois renversé, nos données sont désormais représentées comme des vecteurs dans le magasin vectoriel, prêts pour la recherche et la récupération de similitude. +6. ** Resteur (facultatif) ** + * Pour les données de contrôle et de gestion améliorées, le nœud ** Record Manager ** garde la trace de tous les morceaux lancés. Cela facilite les mises à jour ou les éliminations faciles à mesure que vos données ou vos besoins évoluent. + +Essentiellement, le processus de mise en service transforme nos données brutes en un format prêt pour LLM, optimisé pour une récupération rapide et rentable. diff --git a/fr/use-cases/web-scrape-qna.md b/fr/use-cases/web-scrape-qna.md new file mode 100644 index 00000000..32797e6b --- /dev/null +++ b/fr/use-cases/web-scrape-qna.md @@ -0,0 +1,93 @@ +--- +description: Learn how to scrape, upsert, and query a website +--- + +# Web strecle qna + +*** + +Supposons que vous ayez un site Web (pourrait être un magasin, un site de commerce électronique, un blog), et que vous souhaitez supprimer tous les liens relatifs de ce site Web et demander à LLM de répondre à n'importe quelle question sur votre site Web. Dans ce tutoriel, nous allons passer par la façon d'y parvenir. + +Vous pouvez trouver l'exemple de flux appelé - ** Page Web QNA ** à partir des modèles de marché. + +## Installation + +Nous allons utiliser ** Cheerio Web Scraper ** Node pour raconter des liens à partir d'une URL donnée et du séparateur de texte ** htmltomarkdown ** pour diviser le contenu gratté en petits morceaux. + +
+ +Si vous ne spécifiez rien, par défaut, seule la page URL donnée sera grattée. Si vous souhaitez ramper le reste des liens relatifs, cliquez sur ** Paramètres supplémentaires ** du grattoir Web Cheerio. + +## 1. Crawl plusieurs pages + +1. Sélectionner`Web Crawl`ou`Scrape XML Sitemap`Dans ** Get Relative Links Method **. +2. Saisir`0`Dans ** Obtenez des liens relatifs Limite ** Pour récupérer tous les liens disponibles à partir de l'URL fournie. + +
+ +### Gérer les liens (facultatif) + +1. Entrée URL souhaitée à ramper. +2. Cliquez sur ** Répondre aux liens ** Pour récupérer les liens en fonction des entrées de la méthode ** Get Relative Links ** et ** Obtenir des liens relatifs Limite ** Dans ** Paramètres supplémentaires **. +3. Dans ** Liens rampés ** Section, supprimez les liens indésirables en cliquant sur ** Icône de bac à ordures rouges **. +4. Enfin, cliquez sur ** Enregistrer **. + +
+ +## 2. Upsert + +1. Dans le coin supérieur droit, vous remarquerez un bouton vert: + +
+ +2. Une boîte de dialogue sera affichée qui permettra aux utilisateurs d'infiltrer les données sur PineCone: + +
+ +** Remarque: ** Sous le capot, les actions suivantes seront exécutées: + +* Stracté toutes les données HTML à l'aide du grattoir Web Cheerio +* Convertir toutes les données grattées de HTML à Markdown, puis les diviser +* Les données divisées seront bouclées et converties en incorporations vectorielles à l'aide d'Openai Incorceddings +* Les incorporations vectorielles seront renversées sur Pincecone + +3. Sur[Pinecone console](https://app.pinecone.io)Vous pourrez voir les nouveaux vecteurs qui ont été ajoutés. + +
+ +## 3. Requête + +La question est relativement simple. Une fois que vous avez vérifié que les données ont été déposées à la base de données vectorielle, vous pouvez commencer à poser une question dans le chat: + +
+ +Dans les paramètres supplémentaires de la chaîne QA de récupération conversationnelle, vous pouvez spécifier 2 invites: + +* ** Invite de reformularité: ** Utilisé pour reformuler la question compte tenu de l'historique de conversation passée +* ** Invite de réponse: ** À l'aide de la question reformatique, récupérez le contexte de la base de données vectorielle et renvoyez une réponse finale + +
+ +{% hint style = "info"%} +Il est recommandé de spécifier un message d'invite de réponse détaillé. Par exemple, vous pouvez spécifier le nom de l'IA, la langue pour répondre, la réponse lorsque la réponse n'est pas trouvée (pour empêcher l'hallucination). +{% EndHint%} + +Vous pouvez également activer l'option Retour Source Documents pour renvoyer une liste de morceaux de document d'où vient la réponse de l'IA. + +
+ +## Stracage Web supplémentaire + +Outre le grattoir Web Cheerio, il existe également d'autres nœuds qui peuvent également effectuer le grattage Web: + +* ** Puppeneer: ** Puppeteer est une bibliothèque Node.js qui fournit une API de haut niveau pour contrôler le chrome ou le chrome sans tête. Vous pouvez utiliser des marionnets pour automatiser les interactions de page Web, notamment l'extraction de données de pages Web dynamiques qui nécessitent un JavaScript. +* ** Playwright: ** Playwright est une bibliothèque Node.js qui fournit une API de haut niveau pour contrôler plusieurs moteurs de navigateur, y compris Chromium, Firefox et WebKit. Vous pouvez utiliser Playwright pour automatiser les interactions de la page Web, notamment l'extraction de données de pages Web dynamiques qui nécessitent un JavaScript. +* ** apify: **[Apify](https://apify.com/)est une plate-forme cloud pour le grattage Web et l'extraction de données, qui fournit un[ecosystem](https://apify.com/store)de plus d'un millier d'applications prêtes à l'emploi appelées _ACTORS_ pour divers cas d'utilisation de grattage Web, d'exploration et d'extraction de données. + +
+ +{% hint style = "info"%} +La même logique peut être appliquée à tous les cas d'utilisation de documents, et non seulement limitées au grattage Web! +{% EndHint%} + +Si vous avez une suggestion sur la façon d'améliorer les performances, nous serions ravis de votre[contribution](broken-reference)! diff --git a/fr/use-cases/webhook-tool.md b/fr/use-cases/webhook-tool.md new file mode 100644 index 00000000..aff6d186 --- /dev/null +++ b/fr/use-cases/webhook-tool.md @@ -0,0 +1,144 @@ +--- +description: Learn how to call a webhook on Make +--- + +# Appeler webhook + +*** + +Ce tutoriel vous guide à travers la création d'un outil personnalisé dans Flowiseai qui appelle un point de terminaison WebHook, passant les paramètres nécessaires dans le corps de la demande. Nous utiliserons[Make.com](https://www.make.com/en)Pour configurer un flux de travail WebHook qui envoie des messages à un canal Discord. + +## Configuration d'un webhook dans Make.com + +1. Inscrivez-vous ou connectez-vous à[Make.com](https://www.make.com/en). +2. Créez un nouveau flux de travail contenant un module ** webhook ** et un module ** Discord **, comme indiqué ci-dessous: + + Exemple de workflow
+ +3. À partir du module ** webhook **, copiez l'URL WebHook: + + webhook url
+ +4. Dans le module ** Discord **, configurez-le pour passer le`message`à partir du corps WebHook comme le message envoyé au canal Discord: + + Discord Module Configuration
+ +5. Cliquez sur ** Exécuter une fois ** pour commencer à écouter les demandes entrantes. +6. Envoyez une demande de poste de test avec le corps JSON suivant: + + ```json + { + "message": "Hello Discord!" + } + ``` + + Envoi de la demande de post
+ +7. En cas de succès, vous verrez le message apparaître dans votre canal Discord: + +
Discord Message
+ +Félicitations! Vous avez réussi à configurer un flux de travail WebHook qui envoie des messages à Discord. 🎉 + +## Création d'un outil Webhook dans Flowiseai + +Ensuite, nous créerons un outil personnalisé dans FlowiSeai pour envoyer des demandes WebHook. + +### Étape 1: Ajouter un nouvel outil + +1. Ouvrez le tableau de bord ** Flowiseai **. +2. Cliquez sur ** Outils **, puis sélectionnez ** Créer **. + + Création de l'outil dans FlowiSeai
+ +3. Remplissez les champs suivants: + +| Champ | Valeur | + |-------|-------| +| ** Nom de l'outil ** |`make_webhook`(doit être dans Snake_case) | +| ** Description de l'outil ** | Utile lorsque vous devez envoyer des messages à Discord | +| ** Icône d'outil Src ** |[Flowise Tool Icon](https://github.com/FlowiseAI/Flowise/assets/26460777/517fdab2-8a6e-4781-b3c8-fb92cc78aa0b) | + +4. Définissez le schéma de saisie ** **: + + Exemple de schéma de saisie
+ +### Étape 2: Ajouter la logique de demande de webhook + +Entrez la fonction JavaScript suivante: + +```javascript +const fetch = require('node-fetch'); +const webhookUrl = 'https://hook.eu1.make.com/abcdef'; +const body = { + "message": $message +}; +const options = { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(body) +}; +try { + const response = await fetch(webhookUrl, options); + const text = await response.text(); + return text; +} catch (error) { + console.error(error); + return ''; +} +``` + +5. Cliquez sur ** Ajouter ** pour enregistrer votre outil personnalisé. + + Tool Ajout Confirmation
+ +### Étape 3: Créez un ChatFlow avec l'intégration de webhook + +1. Créez une nouvelle toile et ajoutez les nœuds suivants: + - ** Mémoire de tampon ** + - ** Chatopenai ** + - ** Outil personnalisé ** (Sélectionner`make_webhook`) + - ** Agent de fonction Openai ** + +2. Connectez-les comme indiqué: + + ChatFlow Configuration
+ +3. Enregistrez le ChatFlow et commencez à le tester. + +### Étape 4: Envoi de messages via webook + +Essayez de poser au chatbot une question comme: + +> _ "Comment faire cuire un œuf?" _ + +Ensuite, demandez à l'agent d'envoyer ces informations à Discord: + + Envoi du message via l'agent
+ +Vous devriez voir le message apparaître dans votre canal Discord: + + Message final dans Discord
+ +### Outils de test de webhook alternatifs + +Si vous souhaitez tester les webhooks sans Make.com, envisagez d'utiliser: + +- [Beeceptor](https://beeceptor.com)- Configurez rapidement un point de terminaison API simulé. +- [Webhook.site](https://webhook.site)- Inspectez et déboguez les demandes HTTP en temps réel. +- [Pipedream RequestBin](https://pipedream.com/requestbin)- Capturez et analysez les webhooks entrants. + +## Plus de tutoriels + +- Regardez un guide étape par étape sur l'utilisation de webhooks avec des outils personnalisés Flowise: +{% embed url = "https://youtu.be/_k9xjqegnru"%} + +- Apprenez à connecter Flowise aux feuilles Google à l'aide de webhooks: +{% embed url = "https://youtu.be/fehxlDrljfo"%} + +- Apprenez à connecter Flowise à Microsoft Excel à l'aide de webhooks: +{% embed url = "https://youtu.be/cb2gc8jznjc"%} + +En suivant ce guide, vous pouvez déclencher dynamiquement les workhook workflows et étendre l'automatisation à divers services comme Gmail, Google Sheets, etc. diff --git a/fr/using-flowise/README.md b/fr/using-flowise/README.md new file mode 100644 index 00000000..1e09cf04 --- /dev/null +++ b/fr/using-flowise/README.md @@ -0,0 +1,27 @@ +--- +description: Learn about some core functionalities built into Flowise +--- + +# Utilisation de Flowise + +*** + +Cette section fournit des guides approfondis sur les fonctionnalités flomentes de base. + +## Guides + +* [Agentflow V2](agentflowv2.md) +* [Agentflow V1 (Deprecating)](agentflowv1/) + * [Multi-Agents](agentflowv1/multi-agents.md) + * [Sequential Agents](agentflowv1/sequential-agents/) +* [Prediction](prediction.md) +* [Streaming](streaming.md) +* [Document Stores](document-stores.md) +* [Upsertion](upsertion.md) +* [Analytic](broken-reference/) +* [Monitoring](monitoring.md) +* [Embed](embed.md) +* [Uploads](uploads.md) +* [Variables](variables.md) +* [Workspaces](workspaces.md) +* [Evaluations](evaluations.md) diff --git a/fr/using-flowise/agentflowv1/README.md b/fr/using-flowise/agentflowv1/README.md new file mode 100644 index 00000000..ae909556 --- /dev/null +++ b/fr/using-flowise/agentflowv1/README.md @@ -0,0 +1,21 @@ +--- +description: Learn about how to build agentic systems in Flowise +--- + +# Agentflows + +## Introduire des systèmes agentiques en flux + +La section Agentflows de Flowise fournit une plate-forme pour la construction de systèmes basés sur des agents qui peuvent interagir avec des outils et des sources de données externes. + +Actuellement, Flowise propose deux approches pour la conception de ces systèmes:[**Multi-Agents**](#user-content-fn-1)[^1] and [**Sequential Agents**](#user-content-fn-2)[^ 2]. Ces approches fournissent différents niveaux de contrôle et de complexité, vous permettant de choisir le meilleur ajustement pour vos besoins. + +

Flowise app

+ +{% Hint Style = "Success"%} +Cette documentation explorera à la fois l'agent séquentiel et les approches multi-agents, expliquant leurs fonctionnalités et comment ils peuvent être utilisés pour construire différents types de flux de travail conversationnels. +{% EndHint%} + +[^ 1]: ** Multi-Agents **, construit au-dessus de l'architecture d'agent séquentiel, simplifier le processus de constitution et de gestion d'équipes d'agents en pré-configurer les éléments centraux et en fournissant une abstraction de niveau supérieur. + +[^ 2]: ** Agents séquentiels ** Fournir aux développeurs un accès direct à la structure du flux de travail sous-jacent, permettant un contrôle granulaire sur chaque étape du flux de conversation et offrant une flexibilité maximale pour construire des applications conversationnelles hautement personnalisées. diff --git a/fr/using-flowise/agentflowv1/multi-agents.md b/fr/using-flowise/agentflowv1/multi-agents.md new file mode 100644 index 00000000..bbdea40a --- /dev/null +++ b/fr/using-flowise/agentflowv1/multi-agents.md @@ -0,0 +1,150 @@ +--- +description: Learn how to use Multi-Agents in Flowise, written by @toi500 +--- + +# Multi-agents + +Ce guide a l'intention de fournir une introduction de l'architecture du système d'IA multi-agents dans Flowise, détaillant ses composants, ses contraintes opérationnelles et son flux de travail. + +## Concept + +Analogue à une équipe d'experts du domaine collaborant sur un projet complexe, un système multi-agents utilise le principe de spécialisation dans l'intelligence artificielle. + +Ce système multi-agents utilise un flux de travail hiérarchique et séquentiel, maximisant l'efficacité et la spécialisation. + +### 1. Architecture du système + +Nous pouvons définir l'architecture d'IA multi-agents comme un système d'IA évolutif capable de gérer des projets complexes en les décomposant en sous-tâches gérables. + +Dans Flowise, un système multi-agents comprend deux nœuds ou types d'agents principaux et un utilisateur, interagissant dans un graphique hiérarchique pour traiter les demandes et fournir un résultat ciblé: + +1. ** Utilisateur: ** L'utilisateur agit comme le ** point de départ du système **, fournissant l'entrée ou la demande initiale. Bien qu'un système multi-agents puisse être conçu pour gérer une large gamme de demandes, il est important que ces demandes d'utilisateurs s'alignent sur l'objectif prévu du système. Toute demande tombant en dehors de cette portée peut entraîner des résultats inexacts, des boucles inattendues ou même des erreurs système. Par conséquent, les interactions utilisateur, bien que flexibles, devraient toujours s'aligner sur les fonctionnalités principales du système pour des performances optimales. +2. ** Superviseur AI: ** Le superviseur agit comme l'orchestrateur du Système **, supervisant l'ensemble du flux de travail. Il analyse les demandes de l'utilisateur, les décompose en une séquence de sous-tâches, attribue ces sous-tâches aux agents de travailleurs spécialisés, agrége les résultats et présente finalement la sortie traitée à l'utilisateur. +3. ** Équipe des travailleurs AI: ** Cette équipe se compose d'agents d'IA spécialisés, ou travailleurs, chacun a demandé - via des messages rapides - de gérer une tâche spécifique dans le flux de travail. Ces travailleurs fonctionnent indépendamment, recevant des instructions et des données du superviseur, ** exécutant leurs fonctions spécialisées **, en utilisant des outils selon les besoins et renvoyant les résultats au superviseur. + +
+ +### 2. Contraintes opérationnelles + +Pour maintenir l'ordre et la simplicité, ce système multi-agents fonctionne sous deux contraintes importantes: + +* ** Une tâche à la fois: ** Le superviseur est intentionnellement conçu pour se concentrer sur une seule tâche à la fois. Il attend que le travailleur actif termine sa tâche et renvoie les résultats avant d'analyser l'étape suivante et délégue la tâche suivante. Cela garantit que chaque étape est terminée avec succès avant de passer à autre chose, empêchant la surcomplexité. +* ** Un superviseur par flux: ** Bien qu'il soit théoriquement possible de mettre en œuvre un ensemble de systèmes multi-agents imbriqués pour former une structure hiérarchique plus sophistiquée pour des flux de travail très complexes, ce que Langchain définit comme "[Hierarchical Agent Teams](https://github.com/langchain-ai/langgraph/blob/main/examples/multi\_agent/hierarchical\_agent\_teams.ipynb)", Avec un superviseur de haut niveau et des superviseurs de niveau intermédiaire gérant des équipes de travailleurs, les systèmes multi-agents de Flowise fonctionnent actuellement avec un seul superviseur. + +{% hint style = "info"%} +Ces deux contraintes sont importantes lors de la planification du flux de travail de votre application **. Si vous essayez de concevoir un flux de travail où le superviseur doit déléguer plusieurs tâches simultanément, en parallèle, le système ne pourra pas le gérer et vous rencontrerez une erreur. +{% EndHint%} + +## Le superviseur + +Le superviseur, en tant qu'agent régissant le flux de travail global et responsable de la délégation de tâches au travailleur approprié, nécessite un ensemble de composants pour fonctionner correctement: + +* ** Modèle de chat capable d'appeler de fonction ** pour gérer les complexités de la décomposition des tâches, de la délégation et de l'agrégation de résultats. +* ** Mémoire d'agent (facultatif) **: Bien que le superviseur puisse fonctionner sans mémoire d'agent, ce nœud peut améliorer considérablement les workflows qui nécessitent un accès aux états de superviseur passés. Cette ** Préservation de l'État ** pourrait permettre au superviseur de reprendre le travail à partir d'un point spécifique ou de tirer parti des données antérieures pour améliorer la prise de décision. + +
+ +### Invite du superviseur + +Par défaut, l'invite de superviseur est formulée d'une manière qui demande au superviseur d'analyser les demandes des utilisateurs, de les décomposer en une séquence de sous-tâches et d'attribuer ces sous-tâches aux agents de travailleurs spécialisés. + +Bien que l'invite de superviseur soit personnalisable pour répondre aux besoins d'application spécifiques, il nécessite toujours les deux éléments clés suivants: + +* ** La variable {Team \ _Members}: ** Cette variable est cruciale pour la compréhension du superviseur de la main-d'œuvre disponible car elle fournit au superviseur la liste des noms de travailleurs. Cela permet au superviseur de déléguer avec diligence les tâches au travailleur le plus approprié en fonction de son expertise. +* ** Le mot-clé "finition": ** Ce mot clé sert de signal dans l'invite de superviseur. Il indique quand le superviseur doit considérer la tâche terminée et présenter la sortie finale à l'utilisateur. Sans une directive claire "Finish", le superviseur pourrait continuer à déléguer des tâches inutilement ou ne pas fournir un résultat cohérent et finalisé à l'utilisateur. Il signale que tous les sous-tâches nécessaires ont été exécutées et que la demande de l'utilisateur a été réalisée. + +
+ +{% hint style = "info"%} +Il est important de comprendre que le superviseur joue un rôle très distinct des travailleurs. Contrairement aux travailleurs, qui peuvent être adaptés à des instructions très spécifiques, le ** superviseur fonctionne le plus efficacement avec les directives générales, qui lui permettent de planifier et de déléguer des tâches telles qu'elle juge appropriée. ** Si vous êtes nouveau dans les systèmes multi-agents, nous vous recommandons de rester avec l'invite de superviseur par défaut. +{% EndHint%} + +### Comprendre la limite de récursivité dans le nœud du superviseur: + +Ce paramètre restreint la profondeur maximale des appels de fonction imbriqués dans notre application. Dans notre contexte actuel, ** il limite le nombre de fois que le superviseur peut se déclencher dans une seule exécution de workflow **. Ceci est important pour prévenir la récursivité illimitée et garantir que les ressources sont utilisées efficacement. + +
+ +** Contexte: ** Un utilisateur travaillant chez Solterra Renewables souhaite recueillir des informations disponibles sur Evergreen Energy Group, une société de renouvellement renouvelable située au Royaume-Uni, et ciblera son PDG, Amelia Croft, en tant que prospective potentielle. + +** Demande de l'utilisateur: ** L'employé de Solterra Renewables fournit la requête suivante au système multi-agents: "_Je besoin d'informations sur le groupe d'énergie Evergreen et Amelia Croft en tant que nouveau client potentiel pour notre entreprise._" + +1. **Superviseur:** + * Le superviseur reçoit la demande de l'utilisateur et délègue la tâche de "recherche principale" à la`Lead Researcher Worker`. +2. ** Travailleur du chercheur principal: ** + * Le chercheur principal, en utilisant l'outil de recherche Google, recueille des informations sur Evergreen Energy Group, en se concentrant sur: + * Contexte de l'entreprise, industrie, taille et emplacement. + * Actualités et développements récents. + * Les cadres clés, notamment confirmant le rôle d'Amelia Croft en tant que PDG. + * Le chercheur principal renvoie les informations recueillies au`Supervisor`. +3. **Superviseur:** + * Le superviseur reçoit les données de recherche du chercheur principal et confirme qu'Amelia Croft est un exemple pertinent. + * Le superviseur délègue la tâche "générer des e-mails de vente"`Lead Sales Generator Worker`, fournissant: + * Les informations de recherche sur Evergreen Energy Group. + * Email d'Amelia Croft. + * Contexte sur les énergies renouvelables de Solterra. +4. ** Faire un générateur de vente de plomb: ** + * Le travailleur du générateur de vente principale élabore un projet de courrier électronique personnalisé adapté à Amelia Croft, en prenant en compte: + * Son rôle de PDG et la pertinence des services de Solterra Renewables à son entreprise. + * Informations provenant de la recherche sur l'orientation ou les projets actuels de l'Evergreen Energy Group. + * Le travailleur du générateur de ventes de leads renvoie le brouillon de courrier électronique terminé au`Supervisor`. +5. **Superviseur:** + * Le superviseur reçoit le projet de courrier électronique généré et émet la directive "finir". + * Le superviseur récupère le brouillon par e-mail à l'utilisateur, le`Solterra Renewables employee`. +6. ** L'utilisateur reçoit la sortie: ** L'employé de Solterra Renewables reçoit un projet de courrier électronique personnalisé prêt à être examiné et envoyé à Amelia Croft. + +## Tutoriels vidéo + +Ici, vous trouverez une liste de tutoriels vidéo de[Leon's YouTube channel](https://www.youtube.com/@leonvanzyl)montrant comment créer des applications multi-agents dans Flowise à l'aide de non-code. + +{% embed url = "https://www.youtube.com/watch?ab_channel=leonvanzyl&v=284z8k7yjre"%} + +{% embed url = "https://www.youtube.com/watch?ab_channel=leonvanzyl&v=maqco15y-vs"%} + +{% Embed url = "https://www.youtube.com/watch?ab_channel=leonvanzyl&v=EAH7LDGMVES"%} diff --git a/fr/using-flowise/agentflowv1/sequential-agents/README.md b/fr/using-flowise/agentflowv1/sequential-agents/README.md new file mode 100644 index 00000000..b575edd7 --- /dev/null +++ b/fr/using-flowise/agentflowv1/sequential-agents/README.md @@ -0,0 +1,1214 @@ +--- +description: Learn the Fundamentals of Sequential Agents in Flowise, written by @toi500 +--- + +# Agents séquentiels + +Ce guide offre un aperçu complet de l'architecture séquentielle du système d'agent AI dans Flowise, explorant ses composants principaux et ses principes de conception de workflow. + +{% hint style = "avertissement"%} +** Avis de non-responsabilité **: Cette documentation est destinée à aider les utilisateurs à comprendre et à créer des workflows conversationnels à l'aide de l'architecture séquentielle du système d'agent. Il n'est pas destiné à être une référence technique complète pour le cadre Langgraph et ne doit pas être interprété comme la définition des normes de l'industrie ou des concepts de langue de base. +{% EndHint%} + +## Concept + +Construit au-dessus de[LangGraph](https://www.langchain.com/langgraph), L'architecture des agents séquentiels de Flowise facilite le ** développement de systèmes agentiques conversationnels en structurant le flux de travail en tant que graphique cyclique dirigé (DCG) **, permettant des boucles contrôlées et des processus itératifs. + +Ce graphique, composé de nœuds interconnectés, définit le flux séquentiel d'informations et d'actions, permettant aux agents de traiter les entrées, d'exécuter des tâches et de générer des réponses de manière structurée. + +
+ +### Comprendre l'architecture DCG des agents séquentiels + +Cette architecture simplifie la gestion des flux de travail conversationnels complexes en définissant une séquence d'opérations claire et compréhensible à travers sa structure DCG. + +Explorons quelques éléments clés de cette approche: + +{% Tabs%} +{% Tab Title = "Core Principles"%} +* ** Traitement basé sur le nœud: ** Chaque nœud du graphique représente une unité de traitement discrète, encapsulant sa propre fonctionnalité comme le traitement du langage, l'exécution d'outils ou la logique conditionnelle. +* ** Le flux de données sous forme de connexions: ** Les bords dans le graphique représentent le flux de données entre les nœuds, où la sortie d'un nœud devient l'entrée pour le nœud suivant, permettant une chaîne d'étapes de traitement. +* ** Gestion de l'État: ** L'état est géré comme un objet partagé, persistant tout au long de la conversation. Cela permet aux nœuds d'accéder aux informations pertinentes à mesure que le flux de travail progresse. +{% endtab%} + +{% tab title = "Terminology"%} +* ** Flux: ** Le mouvement ou la direction des données dans le workflow. Il décrit comment l'information passe entre les nœuds lors d'une conversation. +* ** flux de travail: ** La conception globale et la structure du système. C'est le plan qui définit la séquence des nœuds, leurs connexions et la logique qui orchestre le flux de conversation. +* ** État: ** Une structure de données partagée qui représente l'instantané actuel de la conversation. Il comprend l'histoire de la conversation`state.messages`et toutes les variables d'état personnalisées définies par l'utilisateur. +* ** État personnalisé: ** Paires de valeurs clés définies par l'utilisateur ajoutées à l'objet d'état pour stocker des informations supplémentaires pertinentes pour le flux de travail. +* ** Outil: ** Un système, une API ou un service externes qui peuvent être accessibles et exécutés par le workflow pour effectuer des tâches spécifiques, telles que la récupération d'informations, le traitement des données ou l'interaction avec d'autres applications. +* ** Human-in-the-Boop (HITL): ** Une fonctionnalité qui permet une intervention humaine dans le flux de travail, principalement pendant l'exécution de l'outil. Il permet à un examinateur humain d'approuver ou de rejeter un appel d'outil avant son exécution. +* ** Exécution du nœud parallèle: ** Il fait référence à la possibilité d'exécuter plusieurs nœuds simultanément dans un flux de travail en utilisant un mécanisme de ramification. Cela signifie que différentes branches du flux de travail peuvent traiter les informations ou interagir simultanément avec les outils, même si le flux d'exécution global reste séquentiel. +{% endtab%} +{% endtabs%} + +*** + +## Agents séquentiels vs multi-agents + +Alors que les systèmes d'agent multi-agents et séquentiels en flux sont construits sur le cadre Langgraph et partagent les mêmes principes fondamentaux, l'architecture d'agent séquentiel fournit un[lower level of abstraction](#user-content-fn-1)[^ 1], offrant un contrôle plus granulaire sur chaque étape du workflow. + +** Systèmes multi-agents **, qui se caractérisent par une structure hiérarchique avec un agent de superviseur central déléguant des tâches aux agents de travailleurs spécialisés, ** Excel à gérer des flux de travail complexes en les décomposant en sous-tâches gérables **. Cette décomposition en sous-tâches est rendue possible par des éléments de système de base pré-configuration sous le capot, tels que les nœuds de condition, qui nécessiteraient une configuration manuelle dans un système d'agent séquentiel. En conséquence, les utilisateurs peuvent plus facilement constituer et gérer des équipes d'agents. + +En revanche, ** Systèmes d'agent séquentiels ** fonctionnent comme une chaîne de montage rationalisée, où les données circulent séquentiellement à travers une chaîne de nœuds, ce qui les rend idéales pour les tâches exigeant un ordre d'opérations précis et un raffinement incrémentiel de données. Par rapport au système multi-agents, son accès de niveau inférieur à la structure du flux de travail sous-jacent le rend fondamentalement plus ** flexible et personnalisable, offrant une exécution de nœuds parallèles et un contrôle complet sur la logique système **, incorporant les conditions, l'état et les nœuds de boucle dans le flux de travail, permettant la création de nouvelles capacités de ramification dynamique. + +### Présentation des nœuds d'état, de boucle et de condition + +Les agents séquentiels de Flowise offrent de nouvelles capacités pour la création de systèmes conversationnels qui peuvent s'adapter à l'entrée des utilisateurs, prendre des décisions en fonction du contexte et effectuer des tâches itératives. + +Ces capacités sont rendues possibles par l'introduction de quatre nouveaux nœuds principaux; Le nœud d'état, le nœud de boucle et deux nœuds de condition. + +
+ +* ** Node d'état: ** Nous définissons l'état comme une structure de données partagée qui représente l'instantané actuel de notre application ou flux de travail. Le nœud d'état nous permet d'ajouter un état personnalisé ** à notre flux de travail dès le début de la conversation. Cet état personnalisé est accessible et modifiable par d'autres nœuds du workflow, permettant le comportement dynamique et le partage de données. +* ** Node de boucle: ** Ce nœud ** introduit des cycles contrôlés ** dans le flux de travail de l'agent séquentiel, permettant des processus itératifs où une séquence de nœuds peut être répétée en fonction de conditions spécifiques. Cela permet aux agents d'affiner les sorties, de collecter des informations supplémentaires auprès de l'utilisateur ou d'effectuer des tâches plusieurs fois. +* ** Nœuds de condition: ** Le nœud d'agent de condition et de condition fournit le contrôle nécessaire pour ** Créer des flux de conversation complexes avec des chemins de ramification **. Le nœud de condition évalue directement les conditions, tandis que le nœud d'agent de condition utilise le raisonnement d'un agent pour déterminer la logique de ramification. Cela nous permet de guider dynamiquement le comportement du flux en fonction de l'entrée utilisateur, de l'état personnalisé ou des résultats des actions prises par d'autres nœuds. + +### Choisir le bon système + +La sélection du système idéal pour votre application dépend de la compréhension de vos besoins spécifiques de flux de travail. Des facteurs tels que la complexité des tâches, le besoin de traitement parallèle et le niveau de contrôle souhaité sur le flux de données sont tous des considérations clés. + +* ** Pour la simplicité: ** Si votre flux de travail est relativement simple, où les tâches peuvent être accomplies l'une après l'autre et ne nécessitent donc pas une exécution de nœuds parallèles ou un humain dans la boucle (HITL), l'approche multi-agent offre une facilité d'utilisation et une configuration rapide. +* ** Pour la flexibilité: ** Si votre flux de travail a besoin d'une exécution parallèle, de conversations dynamiques, d'une gestion de l'état personnalisé et de la possibilité d'incorporer HITL, l'approche ** Agent séquentiel ** offre la flexibilité et le contrôle nécessaires. + +Voici un tableau comparant les implémentations d'agents multi-agents et séquentielles dans Flowise, mettant en évidence les différences clés et les considérations de conception: + + Multi-agent Agent séquentiel Structure Hierarch ; Le superviseur délégue aux travailleurs spécialisés. linéaire, cyclique et / ou ramification ; Les nœuds se connectent dans une séquence, avec une logique conditionnelle pour la ramification. Film de travail flexible; Conçu pour décomposer une tâche complexe en une séquence de sous-tâches , en a terminé les unes après les autres. Très flexible; prend en charge l'exécution du nœud parallèle , les flux de dialogue complexes, la logique de ramification et les boucles dans un seul tour de conversation. Exécution du nœud parallèle non ; Le superviseur gère une tâche à la fois. Oui ; peut déclencher plusieurs actions en parallèle dans une seule exécution. Gestion de l'état implicite ; L'état est en place, mais n'est pas explicitement géré par le développeur. explicite ; L'état est en place, et les développeurs peuvent définir et gérer un état initial ou personnalisé à l'aide du nœud d'état et du champ "State de mise à jour" dans divers nœuds. Utilisation d'outils Les travailleurs peuvent accéder et utiliser des outils selon les besoins. Les outils sont accessibles et exécutés via Les nœuds . Human-in-the-Loop (HITL) hitl est non pris en charge. Prise en charge par le nœud de l'agent et le nœud de l'outil "nécessite l'approbation", permettant une revue humaine et une approbation ou une approbation ou un rejet de l'outil de l'outil de l'outil " exécution. complexité Niveau d'abstraction plus élevé; simplifie la conception du flux de travail. Niveau d'abstraction inférieur; Conception de workflow plus complexe , nécessitant une planification minutieuse des interactions de nœuds, une gestion de l'état personnalisé et une logique conditionnelle. Cas d'utilisation idéale
  • Les processus linéaires automatisés (par exemple, l'extraction des données doivent être achevé Autre.
  • Construire des systèmes de conversation avec des flux dynamiques.
  • Des flux de travail complexes nécessitant une exécution de nœuds parallèles ou une logique de branchement.
  • Situations où la prise de décision est nécessaire à plusieurs points dans la conversation. + +{% hint style = "info"%} +** Remarque **: Même si les systèmes multi-agents sont techniquement une couche de niveau supérieur construite sur l'architecture d'agent séquentiel, ils offrent une expérience utilisateur distincte et une approche de la conception du flux de travail. La comparaison ci-dessus les traite comme des systèmes séparés pour vous aider à sélectionner la meilleure option pour vos besoins spécifiques. +{% EndHint%} + +*** + +## Nœuds d'agents séquentiels + +Les agents séquentiels apportent une toute nouvelle dimension à couler, ** Présentation de 10 nœuds spécialisés **, chacun servant un objectif spécifique, offrant plus de contrôle sur la façon dont nos agents conversationnels interagissent avec les utilisateurs, traitent les informations, prennent des décisions et exécutent des actions. + +Les sections suivantes visent à fournir une compréhension complète des fonctionnalités, des entrées, des sorties et des meilleures pratiques de chaque nœud, vous permettant finalement de créer des flux de travail conversationnels sophistiqués pour une variété d'applications. + +
    + +*** + +## 1. Noeud de démarrage + +Comme son nom l'indique, le nœud de démarrage est le ** point d'entrée pour tous les workflows de l'architecture d'agent séquentiel **. Il reçoit la requête utilisateur initiale, initialise l'état de conversation et définit le flux en mouvement. + +
    + +### Comprendre le nœud de démarrage + +Le nœud de démarrage garantit que nos workflows conversationnels ont la configuration et le contexte nécessaires pour fonctionner correctement. ** Il est responsable de la configuration des fonctionnalités clés ** qui seront utilisées dans le reste du workflow: + +* ** Définition du LLM par défaut: ** Le nœud de démarrage nous oblige à spécifier un modèle de chat (LLM) compatible avec l'appel de fonction, permettant aux agents du workflow d'interagir avec les outils et les systèmes externes. Ce sera le LLM par défaut utilisé sous le capot dans le workflow. +* ** Initialisation de la mémoire: ** Nous pouvons éventuellement connecter un nœud de mémoire d'agent pour stocker et récupérer l'historique de la conversation, permettant plus de réponses de contexte. +* ** Définition d'un état personnalisé: ** Par défaut, l'état contient un immuable`state.messages`Array, qui agit comme la transcription ou l'historique de la conversation entre l'utilisateur et les agents. Le nœud de démarrage vous permet de connecter un état personnalisé au workflow ajoutant un nœud d'état, permettant le stockage d'informations supplémentaires pertinentes à votre flux de travail +* ** Activant la modération: ** éventuellement, nous pouvons connecter la modération d'entrée pour analyser l'entrée de l'utilisateur et empêcher le contenu potentiellement nocif d'être envoyé à la LLM. + +### Entrées + + requis Description Chat Modèle oui le LLM défaut qui alimentera la conversation. Uniquement compatible avec des modèles capables de fonctionner l'appel . Node de mémoire de l'agent Activer la persistance et la préservation du contexte . Nœud non connecter un nœud d'état à définir un état personnalisé , un contexte partagé qui peut être accessible et modifié par d'autres nœuds dans le flux de travail. Modération de saisie NO Connectez un NODE de modération pour Détection du texte qui pourrait générer une sortie nocive, empêchant l'empêcher d'être envoyé au llm. + +### Sorties + +Le nœud de démarrage peut se connecter aux nœuds suivants comme sorties: + +* ** Nœud d'agent: ** achemine le flux de conversation vers un nœud d'agent, qui peut ensuite exécuter des actions ou accéder aux outils en fonction du contexte de la conversation. +* ** NODE LLM: ** ITSINE Le flux de conversation vers un nœud LLM pour le traitement et la génération de réponse. +* ** Node d'agent de condition: ** Se connecte à un nœud d'agent de condition pour implémenter la logique de branchement en fonction de l'évaluation de la conversation par l'agent. +* ** Node de condition: ** Se connecte à un nœud de condition pour implémenter la logique de branchement en fonction des conditions prédéfinies. + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Choisissez le bon modèle de chat ** + +Assurez-vous que votre LLM sélectionné prend en charge l'appel de la fonction, une fonctionnalité clé pour activer les interactions agent-outil. De plus, choisissez un LLM qui s'aligne sur la complexité et les exigences de votre application. Vous pouvez remplacer le LLM par défaut en le définissant au niveau du nœud d'agent / LLM / Condition d'agent si nécessaire. + +** Considérez le contexte et la persistance ** + +Si votre cas d'utilisation l'exige, utilisez le nœud de mémoire de l'agent pour maintenir le contexte et personnaliser les interactions. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Sélection du modèle de chat incorrect (LLM) ** + +* ** Problème: ** Le modèle de chat sélectionné dans le nœud de démarrage ne convient pas aux tâches ou capacités prévues du flux de travail, ce qui entraîne de mauvaises performances ou des réponses inexactes. +* ** Exemple: ** Un flux de travail nécessite un modèle de chat avec de fortes capacités de résumé, mais le nœud de démarrage sélectionne un modèle optimisé pour la génération de code, conduisant à des résumés inadéquats. +* ** Solution: ** Choisissez un modèle de chat qui s'aligne sur les exigences spécifiques de votre flux de travail. Considérez les forces, les faiblesses du modèle et les types de tâches dans lesquelles il excelle. Reportez-vous à la documentation et expérimentez avec différents modèles pour trouver le meilleur ajustement. + +** Présentation de la configuration du nœud de mémoire de l'agent ** + +* ** Problème: ** Le nœud de mémoire de l'agent n'est pas correctement connecté ou configuré, entraînant la perte de données d'historique de conversation entre les sessions. +* ** Exemple: ** Vous avez l'intention d'utiliser la mémoire persistante pour stocker les préférences des utilisateurs, mais le nœud de mémoire de l'agent n'est pas connecté au nœud de démarrage, ce qui fait réinitialiser les préférences sur chaque nouvelle conversation. +* ** Solution: ** Assurez-vous que le nœud de mémoire de l'agent est connecté au nœud de démarrage et configuré avec la base de données appropriée (SQLite). Pour la plupart des cas d'utilisation, la base de données SQLite par défaut sera suffisante. + +** Modération d'entrée inadéquate ** + +* ** Problème: ** La "modération d'entrée" n'est pas activée ou configurée correctement, permettant à l'entrée utilisateur potentiellement nocive ou inappropriée d'atteindre le LLM et de générer des réponses indésirables. +* ** Exemple: ** Un utilisateur soumet un langage offensant, mais la modération d'entrée ne le détecte pas ou n'est pas du tout configurée, permettant à la requête d'atteindre le LLM. +* ** Solution: ** Ajouter et configurer un nœud de modération d'entrée dans le nœud de démarrage pour filtrer le langage potentiellement nocif ou inapproprié. Personnalisez les paramètres de modération pour vous aligner sur vos exigences spécifiques et vos cas d'utilisation. +{% endtab%} +{% endtabs%} + +## 2. Node de mémoire de l'agent + +Le nœud de mémoire de l'agent ** fournit un mécanisme de stockage de mémoire persistant **, permettant au flux de travail d'agent séquentiel de conserver l'historique de la conversation`state.messages`et tout état personnalisé précédemment défini sur plusieurs interactions + +Cette mémoire à long terme est essentielle pour que les agents apprennent des interactions précédentes, maintiennent le contexte par rapport aux conversations étendues et fournissent des réponses plus pertinentes. + +
    + +### Où les données sont enregistrées + +Par défaut, Flowise utilise sa ** Base de données SQLite ** ** pour stocker l'historique de la conversation et les données d'état personnalisées, créant un tableau "** Checkpoints **" pour gérer ces informations persistantes. + +#### Comprendre la structure et le format de données des "points de contrôle" + +Ce tableau ** stocke des instantanés de l'état du système à différents moments lors d'une conversation **, permettant la persistance et la récupération de l'histoire de la conversation. Chaque ligne représente un point ou un «point de contrôle» spécifique dans l'exécution du workflow. + +
    + +#### Structure de table + +* ** Thread \ _id: ** Un identifiant unique représentant une session de conversation spécifique, ** Notre ID de session **. Il regroupe tous les points de contrôle liés à une seule exécution de workflow. +* ** Checkpoint \ _id: ** Un identifiant unique pour chaque étape d'exécution (exécution du nœud) dans le workflow. Il aide à suivre l'ordre des opérations et à identifier l'état à chaque étape. +* ** Parent \ _id: ** Indique le point de contrôle \ _id de l'étape d'exécution précédente qui a conduit au point de contrôle actuel. Cela établit une relation hiérarchique entre les points de contrôle, permettant la reconstruction du flux d'exécution du flux de travail. +* ** Checkpoint: ** Contient une chaîne JSON représentant l'état actuel du flux de travail à ce point de contrôle spécifique. Cela inclut les valeurs des variables, les messages échangés et toutes les autres données pertinentes capturées à ce stade de l'exécution. +* ** Metadata: ** fournit un contexte supplémentaire sur le point de contrôle, spécifiquement lié aux opérations de nœud. + +#### Comment ça marche + +Au fur et à mesure qu'un flux de travail d'agent séquentiel s'exécute, le système enregistre un point de contrôle dans ce tableau pour chaque étape significative. Ce mécanisme offre plusieurs avantages: + +* ** Suivi d'exécution: ** Les points de contrôle permettent au système de comprendre le chemin d'exécution et l'ordre des opérations dans le workflow. +* ** Gestion de l'État: ** Les points de contrôle stockent l'état du flux de travail à chaque étape, y compris les valeurs variables, l'historique de conversation et toutes les autres données pertinentes. Cela permet au système de maintenir une conscience contextuelle et de prendre des décisions éclairées en fonction de l'état actuel. +* ** Resomption du flux de travail: ** Si le workflow est interrompu ou interrompu (par exemple, en raison d'une erreur système ou d'une demande utilisateur), le système peut utiliser les points de contrôle stockés pour reprendre l'exécution du dernier état enregistré. Cela garantit que la conversation ou la tâche se poursuit à partir de son séjour, préservant les progrès de l'utilisateur et empêchant la perte de données. + +### ** Entrées ** + +Le nœud de mémoire de l'agent n'a ** pas de connexions d'entrée spécifiques **. + +### Configuration du nœud + + requis Description database oui le type de base de base de données pour les histoires de conversation. Actuellement, seul sqlite est pris en charge . + +### Paramètres supplémentaires + + requis Description Le fichier pathle non le trajet du fichier du fichier de données SQLite. Si cela n'est pas fourni, le système utilisera un emplacement par défaut . + +### ** Sorties ** + +Le nœud de mémoire de l'agent interagit uniquement avec le nœud ** de démarrage **, rendant l'historique de conversation disponible à partir du tout début du workflow. + +### ** meilleures pratiques ** + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Utilisation stratégique ** + +Utilisez la mémoire d'agent uniquement lorsque cela est nécessaire. Pour les interactions simples et apatrides, cela pourrait être exagéré. Réservez-le pour des scénarios où le conservation des informations à travers les virages ou les séances est essentiel. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Average inutile ** + +* ** Le problème: ** Utilisation de la mémoire d'agent pour chaque interaction, même lorsqu'il n'est pas nécessaire, introduit un stockage et un traitement inutiles. Cela peut ralentir les temps de réponse et augmenter la consommation de ressources. +* ** Exemple: ** Un chatbot météo simple qui fournit des informations basées sur une seule demande d'utilisateur n'a pas besoin de stocker l'historique des conversations. +* ** Solution: ** Analyser les exigences de votre système et utiliser la mémoire d'agent lorsque le stockage de données persistant est essentiel pour la fonctionnalité ou l'expérience utilisateur. +{% endtab%} +{% endtabs%} + +*** + +## 3. Node d'état + +Le nœud d'état, qui ne peut être connecté qu'au nœud de démarrage, ** fournit un mécanisme pour définir un état défini ou personnalisé ** dans notre flux de travail à partir du début de la conversation. Cet état personnalisé est un objet JSON partagé et peut être mis à jour par les nœuds du graphique, passant d'un nœud à un autre au fur et à mesure que le flux progresse. + +
    + +### Comprendre le nœud d'État + +Par défaut, l'état comprend un`state.messages`Array, qui agit comme notre histoire de conversation. Ce tableau stocke tous les messages échangés entre l'utilisateur et les agents, ou tout autre acteur du flux de travail, en le préservant tout au long de l'exécution du workflow. + +Car par définition`state.messages`Le tableau est immuable et ne peut pas être modifié, ** Le but du nœud d'état est de nous permettre de définir les paires de valeurs clés personnalisées **, élargissant l'objet d'état pour contenir toute information supplémentaire pertinente pour notre flux de travail. + +{% hint style = "info"%} +Lorsqu'aucun nœud de mémoire d'agent ** n'est utilisé, l'état fonctionne en mémoire et n'est pas persisté pour une utilisation future. +{% EndHint%} + +### Entrées + +Le nœud d'état n'a ** pas de connexions d'entrée spécifiques **. + +### Sorties + +Le nœud d'état ne peut se connecter qu'au ** Node de démarrage **, permettant la configuration d'un état personnalisé depuis le début du flux de travail et permettant à d'autres nœuds d'accéder et potentiellement de modifier cet état personnalisé partagé. + +### Paramètres supplémentaires + + requis Description État personnalisé Oui un objet JSON représentant l'état de personnalité . Cet objet peut contenir toutes les paires de valeurs de clé pertinentes pour l'application. + +### Comment définir un état personnalisé + +Spécifiez la clé ** **, ** Type d'opération ** et ** Valeur par défaut ** pour l'objet d'état. Le type d'opération peut être "remplacer" ou "ajouter". + +* **Remplacer** + 1. Remplacez la valeur existante par la nouvelle valeur. + 2. Si la nouvelle valeur est nul, la valeur existante sera conservée. +* **Ajouter** + 1. Ajoutez la nouvelle valeur à la valeur existante. + 2. Les valeurs par défaut peuvent être vides ou un tableau. Ex: \ ["a", "b"] + 3. La valeur finale est un tableau. + +#### Exemple utilisant JS + +{% code overflow = "wrap"%} +```javascript +{ + aggregate: { + value: (x, y) => x.concat(y), // here we append the new message to the existing messages + default: () => [] + } +} +``` +{% Endcode%} + +#### Exemple à l'aide de la table + +Pour définir un état personnalisé à l'aide de l'interface de table dans le nœud d'état, suivez ces étapes: + +1. ** Ajouter l'élément: ** Cliquez sur le bouton "+ Ajouter l'élément" pour ajouter des lignes au tableau. Chaque ligne représente une paire de valeurs clés dans votre état personnalisé. +2. ** Spécifiez les touches: ** Dans la colonne "clé", entrez le nom de chaque clé que vous souhaitez définir dans votre objet d'état. Par exemple, vous pourriez avoir des clés comme "nom d'utilisateur", "userLocation", etc. +3. ** Choisissez les opérations: ** Dans la colonne "Opération", sélectionnez l'opération souhaitée pour chaque clé. Vous avez deux options: + * ** Remplacer: ** Cela remplacera la valeur existante de la touche par la nouvelle valeur fournie par un nœud. Si la nouvelle valeur est nul, la valeur existante sera conservée. + * ** Ajouter: ** Cela ajoutera la nouvelle valeur à la valeur existante de la clé. La valeur finale sera un tableau. +4. ** Définissez les valeurs par défaut: ** Dans la colonne "Valeur par défaut", entrez la valeur initiale pour chaque touche. Cette valeur sera utilisée si aucun autre nœud ne fournit une valeur pour la clé. La valeur par défaut peut être vide ou un tableau. + +#### Exemple de table + +| Clé | Opération | Valeur par défaut | +| -------- | --------- | ------------- | +| Nom d'utilisateur | Remplacer | NULL | + +
    + +1. Ce tableau définit une clé à l'état personnalisé:`userName`. +2. Le`userName`La clé utilisera l'opération "Remplacer", ce qui signifie que sa valeur sera mise à jour chaque fois qu'un nœud fournit une nouvelle valeur. +3. Le`userName`Key a une valeur par défaut de _null, _ indiquant qu'il n'a pas de valeur initiale. + +{% hint style = "info"%} +N'oubliez pas que cette approche basée sur une table est une alternative à la définition de l'état personnalisé à l'aide de JavaScript. Les deux méthodes obtiennent le même résultat. +{% EndHint%} + +#### Exemple utilisant l'API + +```json +{ + "question": "hello", + "overrideConfig": { + "stateMemory": [ + { + "Key": "userName", + "Operation": "Replace", + "Default Value": "somevalue" + } + ] + } +} +``` + +### Meilleures pratiques + +{% Tabs%} +{% tab title = "pro-tips"%} +** Planifiez votre structure d'état personnalisée ** + +Avant de construire votre flux de travail, concevez la structure de votre état personnalisé. Un état personnalisé bien organisé rendra votre flux de travail plus facile à comprendre, à gérer et à déboguer. + +** Utilisez des noms de clés significatifs ** + +Choisissez des noms de clés descriptifs et cohérents qui indiquent clairement l'objectif des données qu'ils détiennent. Cela améliorera la lisibilité de votre code et facilitera que les autres (ou vous à l'avenir) comprennent comment l'état personnalisé est utilisé. + +** Gardez l'état personnalisé minimal ** + +Stockez uniquement les informations dans l'état personnalisé qui est essentielle à la logique et à la prise de décision du workflow. + +** Considérons la persistance de l'État ** + +Si vous devez préserver l'état sur plusieurs sessions de conversation (par exemple, pour les préférences des utilisateurs, l'historique des commandes, etc.), utilisez le nœud de mémoire de l'agent pour stocker l'état dans une base de données persistante. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Mises à jour d'état incohérentes ** + +* ** Problème: ** La mise à jour de l'état personnalisé en plusieurs nœuds sans stratégie claire peut entraîner des incohérences et un comportement inattendu. +* **Exemple** + 1. Mises à jour de l'agent 1`orderStatus`au "paiement confirmé". + 2. L'agent 2, dans une succursale différente, mises à jour`orderStatus`pour "commander complet" sans vérifier l'état précédent. +* ** Solution: ** Utiliser les nœuds des conditions pour contrôler le flux des mises à jour de l'état personnalisé et s'assurer que les transitions d'état personnalisées se produisent de manière logique et cohérente. +{% endtab%} +{% endtabs%} + +*** + +## 4. Node d'agent + +Le nœud d'agent est un composant Core ** de l'architecture d'agent séquentiel. ** Il agit comme un décideur et un orchestrateur dans notre flux de travail. + +
    + +### Comprendre le nœud d'agent + +En recevant les commentaires des nœuds précédents, qui comprend toujours l'historique complet de la conversation`state.messages`Et tout état personnalisé à ce stade de l'exécution, le nœud d'agent utilise sa "personnalité" définie, établie par l'invite du système, pour déterminer si des outils externes sont nécessaires pour répondre à la demande de l'utilisateur. + +* Si des outils sont nécessaires, le nœud d'agent sélectionne et exécute de manière autonome l'outil approprié. Cette exécution peut être automatique ou, pour les tâches sensibles, nécessite l'approbation humaine (HITL) avant de continuer. Une fois l'outil terminé son fonctionnement, le nœud d'agent reçoit les résultats, les traite à l'aide du modèle de chat désigné (LLM) et génère une réponse complète. +* Dans les cas où aucun outil n'est nécessaire, le nœud d'agent exploite directement le modèle de chat (LLM) pour formuler une réponse basée sur le contexte de conversation actuel. + +### Entrées + + requis Description Les outils externes no offrent le nœud d'agent avec accès à une suite d'outils d'EXTERIAL . Pour effectuer des actions et récupérer des informations. Modèle de chat non Ajoutez un nouveau modèle de chat à écraser le modèle de chat par défaut (LLM) du flux de travail. Compatible uniquement avec des modèles capables d'appels de fonction. Démarrer le nœud oui reçoit le Entrée utilisateur initial , ainsi que l'état personnalisé (IF Set Up) et le reste de l'état par défaut . Node. Condition Node Oui reçoit les entrées à partir d'un nœud de condition précédente, permettant au nœud d'agent de prendre des actions ou de guider la conversation en fonction du résultat de l'état de la condition du nœud . Node Oui reçoit les entrées d'un nœud d'agent de condition précédente, permettant au nœud de l'agent prendre des actions ou guider la conversation en fonction du résultat de l'évaluation de l'agent d'agent . Agent d'agent d'agent . Node Oui reçoit les entrées d'un nœud d'agent précédent, Activé des actions d'agent chaîné et en maintenant le contexte de conversation LLM NODE Oui Permettre au nœud de l'agent traiter la réponse de la LLM . Node d'outil Oui reçoit la sortie d'un nœud d'outil, permettant au nœud d'agent de traiter et d'intégrer les sorties de l'outil dans son son Réponse . + +{% hint style = "info"%} +Le nœud d'agent ** nécessite au moins une connexion à partir des nœuds suivants **: nœud de démarrage, nœud d'agent, nœud de condition, nœud d'agent de condition, nœud LLM ou nœud d'outil. +{% EndHint%} + +### Sorties + +Le nœud d'agent peut se connecter aux nœuds suivants comme sorties: + +* ** Node d'agent: ** passe le contrôle à un nœud d'agent ultérieur, permettant le chaînage de plusieurs actions d'agent dans un workflow. Cela permet des flux conversationnels plus complexes et une orchestration des tâches. +* ** Node LLM: ** passe la sortie de l'agent à un nœud LLM, permettant un traitement linguistique, une génération de réponse ou une prise de décision supplémentaires en fonction des actions et des informations de l'agent. +* ** Node d'agent de condition: ** Dirige le flux vers un nœud d'agent de condition. Ce nœud évalue la sortie du nœud d'agent et ses conditions prédéfinies pour déterminer l'étape suivante appropriée dans le flux de travail. +* ** Node de condition: ** Similaire au nœud d'agent de condition, le nœud de condition utilise des conditions prédéfinies pour évaluer la sortie du nœud d'agent, dirigeant le flux le long de différentes branches en fonction du résultat. +* ** Node de fin: ** conclut le flux de conversation. +* ** Node de boucle: ** Redirige le flux vers un nœud précédent, permettant des processus itératifs ou cycliques dans le flux de travail. Ceci est utile pour les tâches qui nécessitent plusieurs étapes ou impliquent de raffiner les résultats en fonction des interactions précédentes. Par exemple, vous pouvez remonter à un nœud d'agent antérieur ou à un nœud LLM pour recueillir des informations supplémentaires ou affiner le flux de conversation en fonction de la sortie du nœud d'agent actuel. + +### Configuration du nœud + + requis Description Nom de l'agent Oui ciblez-le lorsque vous utilisez des boucles dans le workflow. invite du système non définit le `` personne '' de l'agent et guide son comportement . Par exemple, " vous êtes un agent de service client spécialisé dans le support technique [...]." Exiger l'approbation non active la fonction Human-in-the-Loop (HITL). Si défini sur ' true ,' le nœud d'agent demandera l'approbation humaine avant d'exécuter un outil. Ceci est particulièrement utile pour les opérations sensibles ou lorsque la surveillance humaine est souhaitée. Par défaut est ' false ,' permettant au nœud d'agent d'exécuter des outils de manière autonome. + +### Paramètres supplémentaires + + requis Description invite humain non Cette invite est en guise State.Messages. Il nous permet d'injecter un message de type humain dans le flux de conversation après que le nœud d'agent a traité sa contribution et avant que le nœud suivant ne reçoive la sortie du nœud d'agent. Invite d'approbation Non Une invite personnalisable présentée à l'Ensembleur humain lorsque la surface Hitl est active. Cette invite fournit un contexte sur l'exécution de l'outil, y compris le nom et le but de l'outil. La variable {outils} dans l'invite sera remplacée dynamiquement par la liste réelle des outils suggérés par l'agent, en veillant à ce que le réviseur humain ait toutes les informations nécessaires pour prendre une décision éclairée. Interface hitl. Cela permet d'adapter le langage au contexte spécifique et d'assurer la clarté du réviseur humain. Rejeter le texte du bouton Non Personnalise le texte affiché sur le bouton pour rejeter l'exécution de l'outil dans l'interface hitl. Comme le texte du bouton Approuver, cette personnalisation améliore la clarté et fournit une action claire à prendre pour le réviseur humain s'il jugera l'exécution de l'outil inutile ou potentiellement nocif. État de mise à jour non fournit un mécanisme pour modifier l'objet d'état personnalisé partagé dans le cadre du workflow . Ceci est utile pour stocker les informations recueillies par l'agent ou influencer le comportement des nœuds suivants. MAX IDERATER NON Limite le nombre d'itérations Un nœud d'agent peut faire dans une seule exécution de workfwing. + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Clear System Invite ** + +Créez une invite de système concise et sans ambiguïté qui reflète avec précision le rôle et les capacités de l'agent. Cela guide la prise de décision de l'agent et garantit qu'elle agit dans sa portée définie. + +** Sélection d'outils stratégiques ** + +Choisissez et configurez les outils disponibles pour le nœud d'agent, en vous assurant qu'ils s'alignent avec l'objectif de l'agent et les objectifs globaux du flux de travail. + +** Hitl pour les tâches sensibles ** + +Utilisez l'option «exiger l'approbation» pour les tâches impliquant des données sensibles, nécessitant un jugement humain ou comporte un risque de conséquences imprévues. + +** Tirez parti des mises à jour d'état personnalisés ** + +Mettez à jour l'objet d'état personnalisé stratégiquement pour stocker les informations recueillies ou influencer le comportement des nœuds en aval. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Inaction de l'agent en raison de la surcharge d'outils ** + +* ** Problème: ** Lorsqu'un nœud d'agent a accès à un grand nombre d'outils dans une seule exécution du flux de travail, il peut avoir du mal à décider quel outil est le plus approprié à utiliser, même lorsqu'un outil est clairement nécessaire. Cela peut conduire à l'agent à ne pas appeler un outil, ce qui entraîne des réponses incomplètes ou inexactes. +* ** Exemple: ** Imaginez un agent de support client conçu pour gérer un large éventail de demandes. Vous l'avez équipé d'outils pour le suivi des commandes, les informations de facturation, les rendements des produits, le support technique, etc. Un utilisateur demande: "Quel est le statut de ma commande?" Mais l'agent, submergé par le nombre d'outils potentiels, répond par une réponse générique comme: "Je peux vous aider. Quel est votre numéro de commande?" sans réellement utiliser l'outil de suivi des commandes. +* **Solution** + 1. ** Affiner les invites du système: ** Fournissez des instructions et des exemples plus clairs dans l'invite du système du nœud d'agent pour le guider vers la bonne sélection d'outils. Si nécessaire, soulignez les capacités spécifiques de chaque outil et les situations dans lesquelles elles doivent être utilisées. + 2. ** Limitez les choix d'outils par nœud: ** Si possible, décomposez les workflows complexes en segments plus petits et plus gérables, chacun avec un ensemble d'outils plus ciblés. Cela peut aider à réduire la charge cognitive sur l'agent et à améliorer sa précision de sélection d'outils. + +** négligeant Hitl pour les tâches sensibles ** + +* ** Problème: ** Le fait de ne pas utiliser la fonctionnalité "exiger l'approbation" du nœud d'agent (HITL) pour les tâches impliquant des informations sensibles, des décisions critiques ou des actions avec des conséquences potentielles du monde réel peut entraîner des résultats involontaires ou des dommages à la confiance des utilisateurs. +* ** Exemple: ** Votre agent de réservation de voyage a accès aux informations de paiement d'un utilisateur et peut réserver automatiquement les vols et les hôtels. Sans HITL, une mauvaise interprétation de l'intention de l'utilisateur ou une erreur dans la compréhension de l'agent pourrait entraîner une réservation incorrecte ou une utilisation non autorisée des détails de paiement de l'utilisateur. +* **Solution** + 1. ** Identifier les actions sensibles: ** Analyser votre flux de travail et identifier toutes les actions qui impliquent l'accès ou le traitement des données sensibles (par exemple, les informations de paiement, les informations personnelles). + 2. ** Implémentez "Require approbation": ** Pour ces actions sensibles, activez l'option "Exiger l'approbation" dans le nœud de l'agent. Cela garantit qu'un humain passe en revue l'action proposée par l'agent et le contexte pertinent avant que toute donnée sensible ne soit accessible ou que toute action irréversible soit prise. + 3. ** Concevoir des invites d'approbation claire: ** Fournir des invites claires et concises pour les examinateurs humains, résumant l'intention de l'agent, l'action proposée et les informations pertinentes nécessaires pour que le réviseur prenne une décision éclairée. + +** Invite du système peu claire ou incomplète ** + +* ** Problème: ** L'invite système fournie au nœud d'agent n'a pas la spécificité et le contexte nécessaires pour guider efficacement l'agent dans l'exécution de ses tâches prévues. Une invite vague ou trop générale peut entraîner des réponses non pertinentes, des difficultés à comprendre l'intention des utilisateurs et une incapacité à tirer parti des outils ou des données de manière appropriée. +* ** Exemple: ** Vous construisez un agent de réservation de voyage, et votre invite système déclare simplement "_ Vous êtes un assistant d'IA utile." Il manque les instructions et le contexte spécifiques nécessaires pour que l'agent guide efficacement les utilisateurs à travers les recherches de vol, les réservations d'hôtels et la planification d'itinéraire. +* ** Solution: ** Élaborez une invite de système détaillée et contextuelle: + +{% code overflow = "wrap"%} +``` +You are a travel booking agent. Your primary goal is to assist users in planning and booking their trips. +- Guide them through searching for flights, finding accommodations, and exploring destinations. +- Be polite, patient, and offer travel recommendations based on their preferences. +- Utilize available tools to access flight data, hotel availability, and destination information. +``` +{% Endcode%} +{% endtab%} +{% endtabs%} + +*** + +## 5. Node LLM + +Comme le nœud d'agent, le nœud LLM est un composant Core ** de l'architecture d'agent séquentiel **. Les deux nœuds utilisent les mêmes modèles de chat (LLMS) par défaut, fournissant les mêmes capacités de traitement du langage de base, mais le nœud LLM se distingue dans ces domaines clés. + +
    + +### Avantages clés du nœud LLM + +Tandis qu'une comparaison détaillée entre le nœud LLM et le nœud d'agent est disponible en[this section](./#agent-node-vs.-llm-node-selecting-the-optimal-node-for-conversational-tasks), voici un bref aperçu des principaux avantages du nœud ** **: + +* ** Données structurées: ** Le nœud LLM fournit une fonctionnalité dédiée pour définir un schéma JSON pour sa sortie. Cela rend exceptionnellement facile d'extraire des informations structurées des réponses de la LLM et de transmettre ces données aux nœuds conséquents dans le flux de travail. Le nœud d'agent n'a pas cette fonction de schéma JSON intégré +* ** HITL: ** Bien que les deux nœuds prennent en charge Hitl pour l'exécution de l'outil, le nœud LLM délève ce contrôle au nœud d'outil lui-même, offrant plus de flexibilité dans la conception du flux de travail. + +### Entrées + + requis Description Modèle de chat flux de travail. Compatible uniquement avec des modèles capables d'appels de fonction. Démarrer le nœud oui reçoit le Entrée utilisateur initial , ainsi que l'état personnalisé (IF Set Up) et le reste de l'état par défaut . Node. Le nœud d'agent oui reçoit la sortie d'un nœud d'agent, qui peut inclure les résultats de l'exécution d'outils ou les réponses générées par l'agent. Condition des réponses. Node Oui reçoit les entrées d'un nœud de condition précédente, permettant au nœud LLM de prendre des actions ou de guider la conversation en fonction du résultat de l'évaluation de l'état de condition . Condition d'agent de condition d'agent de condition . Node Oui reçoit les entrées d'un nœud d'agent de condition précédente, permettant au nœud LLM de prendre des actions ou de guider la conversation en fonction du résultat de l'évaluation de l'agent de condition . LLM de l'agent . Node oui reçoit la sortie d'un autre nœud LLM, activant le raisonnement chaîné ou le traitement de l'information sur plusieurs nœuds LLM. Node d'outil oui Fournir les résultats de l'exécution des outils pour un traitement ultérieur ou la génération de réponse. + +{% hint style = "info"%} +Le nœud ** llm nécessite au moins une connexion à partir des nœuds suivants **: nœud de démarrage, nœud d'agent, nœud de condition, nœud d'agent de condition, nœud LLM ou nœud d'outil. +{% EndHint%} + +### ** Configuration du nœud ** + + requis Description llm Name oui Ajouter un nom de description à la llm NODI lisibilité et facilement ciblez-la lorsque vous utilisez des boucles dans le workflow. + +### Sorties + +Le nœud LLM peut se connecter aux nœuds suivants comme sorties: + +* ** Node d'agent: ** passe la sortie de LLM vers un nœud d'agent, qui peut ensuite utiliser les informations pour décider des actions, exécuter des outils ou guider le flux de conversation. +* ** Node LLM: ** passe la sortie à un nœud LLM ultérieur, permettant le chaînage de plusieurs opérations LLM. Ceci est utile pour les tâches telles que le raffinage de la génération de texte, la réalisation de plusieurs analyses ou la rupture du traitement du langage complexe en étapes. +* ** Node d'outil **: passe la sortie vers un nœud d'outil, permettant l'exécution d'un outil spécifique basé sur les instructions du nœud LLM. +* ** Node d'agent de condition: ** Dirige le flux vers un nœud d'agent de condition. Ce nœud évalue la sortie du nœud LLM et ses conditions prédéfinies pour déterminer l'étape suivante appropriée du flux de travail. +* ** Node de condition: ** Similaire au nœud de l'agent de condition, le nœud de condition utilise des conditions prédéfinies pour évaluer la sortie du nœud LLM, dirigeant le flux le long de différentes branches en fonction du résultat. +* ** Node de fin: ** conclut le flux de conversation. +* ** Node de boucle: ** Redirige le flux vers un nœud précédent, permettant des processus itératifs ou cycliques dans le flux de travail. Cela pourrait être utilisé pour affiner la sortie du LLM sur plusieurs itérations. + +### Paramètres supplémentaires + + requis Description Système invite non définit le agent "de" Personne "et les guides de son comportement . Par exemple, " vous êtes un agent de service client spécialisé dans la prise en charge technique [...]." Invite humain non Cette invite est ajoutée à la state.Messages Array comme message humain. Il nous permet à injecter un message de type humain dans le flux de conversation après que le nœud LLM a traité ses entrées et avant que le nœud suivant ne reçoive la sortie structurée du nœud LLM non Schema (clé, type, valeurs d'énumération, description). State de mise à jour Non fournit un mécanisme pour modifier l'objet à état personnalisé partagé dans le flux de travail . Ceci est utile pour stocker des informations recueillies par le nœud LLM ou influencer le comportement des nœuds suivants. + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Clear System Invite ** + +Crégurez une invite de système concise et sans ambiguïté qui reflète avec précision le rôle et les capacités du nœud LLM. Cela guide la prise de décision du nœud LLM et garantit qu'il agit dans sa portée définie. + +** Optimiser pour la sortie structurée ** + +Gardez vos schémas JSON le plus simple possible, en nous concentrant sur les éléments de données essentiels. Activez uniquement la sortie structurée JSON lorsque vous devez extraire des points de données spécifiques de la réponse de LLM ou lorsque les nœuds en aval nécessitent une entrée JSON. + +** Sélection d'outils stratégiques ** + +Choisissez et configurez les outils disponibles pour le nœud LLM (via le nœud de l'outil), en veillant à ce qu'ils s'alignent sur l'objectif de l'application et les objectifs globaux du flux de travail. + +** Hitl pour les tâches sensibles ** + +Utilisez l'option «exiger l'approbation» pour les tâches impliquant des données sensibles, nécessitant un jugement humain ou comporte un risque de conséquences imprévues. + +** Tirez parti des mises à jour d'état ** + +Mettez à jour l'objet d'état personnalisé stratégiquement pour stocker les informations recueillies ou influencer le comportement des nœuds en aval. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Exécution d'outils involontaire en raison d'une configuration HITL incorrecte ** + +* ** Problème: ** Bien que le nœud LLM puisse déclencher des nœuds d'outil, il s'appuie sur la configuration du nœud d'outil pour l'approbation de l'homme dans la boucle (HITL). Le fait de ne pas configurer correctement HITL pour des actions sensibles peut entraîner l'exécution d'outils sans examen humain, provoquant potentiellement des conséquences imprévues. +* ** Exemple: ** Votre nœud LLM est conçu pour interagir avec un outil qui apporte des modifications aux données de l'utilisateur. Vous avez l'intention d'avoir un examen humain de ces modifications avant l'exécution, mais l'option "Require Approbation" du nœud d'outil connecté n'est pas activée. Cela pourrait entraîner l'outil modifiant automatiquement les données utilisateur basées uniquement sur la sortie du LLM, sans aucune surveillance humaine. +* **Solution** + 1. ** Paramètres du nœud d'outil à double vérification: ** Assurez-vous toujours que l'option "Require approbation" est activée dans les paramètres de tout nœud d'outil qui gère les actions sensibles. + 2. ** Testez soigneusement HITL: ** Avant de déployer votre flux de travail, testez le processus HITL pour vous assurer que les étapes d'examen humain sont déclenchées comme prévu et que le mécanisme d'approbation / rejet fonctionne correctement. + +** Surutilisation ou malentendu de la sortie structurée JSON ** + +* ** Problème: ** Bien que la fonction de sortie structurée JSON du nœud LLM soit puissante, il t'utilise ou ne comprend pas pleinement que ses implications peuvent entraîner des erreurs de données. +* ** Exemple: ** Vous définissez un schéma JSON complexe pour la sortie du nœud LLM, même si les tâches en aval ne nécessitent qu'une réponse texte simple. Cela ajoute une complexité inutile et rend votre flux de travail plus difficile à comprendre et à entretenir. De plus, si la sortie de la LLM ne se conforme pas au schéma défini, il peut provoquer des erreurs dans les nœuds suivants. +* **Solution** + 1. ** Utilisez la sortie JSON stratégiquement: ** Activez uniquement la sortie structurée JSON lorsque vous avez un besoin clair d'extraire des points de données spécifiques de la réponse de LLM ou lorsque les nœuds d'outil en aval nécessitent une entrée JSON. + 2. ** Gardez les schémas simples: ** Concevez vos schémas JSON comme simples et concis que possible, en nous concentrant uniquement sur les éléments de données qui sont absolument nécessaires à la tâche. +{% endtab%} +{% endtabs%} + +*** + +## 6. Nœud d'outil + +Le nœud d'outil est un composant précieux du système d'agent séquentiel de Flowise, ** permettant l'intégration et l'exécution d'outils externes ** dans les flux de travail conversationnels. Il agit comme un pont entre le traitement basé sur le langage des nœuds LLM et les fonctionnalités spécialisées des outils, API ou services externes. + +
    + +### Comprendre le nœud d'outil + +La fonction principale du nœud d'outil consiste à ** exécuter des outils externes ** en fonction des instructions reçues d'un nœud LLM et de ** Fournir une flexibilité pour l'intervention de l'humanité dans la boucle (HITL) ** dans le processus d'exécution de l'outil. + +#### Voici une explication étape par étape de la façon dont cela fonctionne + +1. ** Réception des appels d'outil: ** Le nœud d'outil reçoit les entrées à partir d'un nœud LLM. Si la sortie de LLM contient le`tool_calls`Propriété, le nœud d'outil procédera à l'exécution de l'outil. +2. ** Exécution: ** Le nœud d'outil passe directement les LLM`tool_calls`(qui inclut le nom de l'outil et tous les paramètres requis) à l'outil externe spécifié. Sinon, le nœud d'outil n'exécute aucun outil dans cette exécution particulière de workflow. Il ne traite ni n'interpréte la sortie de LLM de quelque manière que ce soit. +3. ** Human-in-the-Loop (HITL): ** Le nœud d'outil permet HITL facultatif, permettant l'examen humain et l'approbation ou le rejet de l'exécution de l'outil avant qu'il ne se produise. +4. ** Passage de sortie: ** Après l'exécution de l'outil (soit automatique ou après l'approbation de HITL), le nœud d'outil reçoit la sortie de l'outil et le passe au nœud suivant dans le workflow. Si la sortie du nœud d'outil n'est pas connectée à un nœud ultérieur, la sortie de l'outil est renvoyée au nœud LLM d'origine pour un traitement ultérieur. + +### Entrées + + requis Description llm nœud oui Propriété TOLL_CALLS . S'il est présent, le nœud d'outil les utilisera pour exécuter l'outil spécifié. Outils externes Non fournit le nœud d'outil avec accès à une suite d'outils externaux , l'activant pour effectuer des actions et récupérer des informations. + +### Configuration du nœud + + requis Description Nom d'outil Oui lisibilité. nécessitent l'approbation (HITL) non active la fonction humaine dans la boucle (HITL) . Si défini sur ' true ,' le nœud d'outil demandera l'approbation humaine avant d'exécuter un outil. Ceci est particulièrement utile pour les opérations sensibles ou lorsque la surveillance humaine est souhaitée. Par défaut est ' false ,' permettant au nœud d'outil d'exécuter des outils de manière autonome. + +### Sorties + +Le nœud d'outil peut se connecter aux nœuds suivants comme sorties: + +* ** Node d'agent: ** passe la sortie du nœud d'outil (le résultat de l'outil exécuté) à un nœud d'agent. Le nœud d'agent peut ensuite utiliser ces informations pour décider des actions, exécuter d'autres outils ou guider le flux de conversation. +* ** Node LLM: ** passe la sortie à un nœud LLM ultérieur. Cela permet l'intégration des résultats de l'outil dans le traitement de la LLM, permettant une analyse ou un raffinement plus approfondi du flux de conversation en fonction de la sortie de l'outil. +* ** Node d'agent de condition: ** Dirige le flux vers un nœud d'outil de condition. Ce nœud évalue la sortie du nœud d'outil et ses conditions prédéfinies pour déterminer l'étape suivante appropriée dans le flux de travail. +* ** Node de condition: ** Similaire au nœud d'agent de condition, le nœud de condition utilise des conditions prédéfinies pour évaluer la sortie du nœud d'outil, dirigeant le flux le long de différentes branches en fonction du résultat. +* ** Node de fin: ** conclut le flux de conversation. +* ** Node de boucle: ** Redirige le flux vers un nœud précédent, permettant des processus itératifs ou cycliques dans le flux de travail. Cela pourrait être utilisé pour les tâches qui nécessitent plusieurs exécutions d'outils ou impliquent de raffiner la conversation en fonction des résultats de l'outil. + +### Paramètres supplémentaires + + requis Description Approbation invite no une invite personnalisable présentée à la revue humaine lorsque la trace de trait est active. Cette invite fournit un contexte sur l'exécution de l'outil, y compris le nom et le but de l'outil. La variable {outils} dans l'invite sera remplacée dynamiquement par la liste réelle des outils suggérés par le nœud LLM, garantissant que le réviseur humain dispose de toutes les informations nécessaires pour prendre une décision éclairée. Approver le texte du bouton non Exécution dans l'interface HITL. Cela permet d'adapter le langage au contexte spécifique et d'assurer la clarté du réviseur humain. Rejeter le texte du bouton Non Personnalise le texte affiché sur le bouton pour rejeter l'exécution de l'outil dans l'interface hitl. Comme le texte du bouton Approuver, cette personnalisation améliore la clarté et fournit une action claire à prendre pour le réviseur humain s'il jugera l'exécution de l'outil inutile ou potentiellement nocif. État de mise à jour Non fournit un mécanisme pour modifier l'objet d'état personnalisé dans le cadre du travail de travail . Ceci est utile pour stocker les informations recueillies par le nœud d'outil (après l'exécution de l'outil) ou influencer le comportement des nœuds suivants. + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Placement stratégique HITL ** + +Considérez quels outils nécessitent une surveillance humaine (HITL) et activer l'option "Require approbation" en conséquence. + +** Invite d'approbation informative ** + +Lorsque vous utilisez HITL, concevez des invites claires et informatives pour les examinateurs humains. Fournir un contexte suffisant de la conversation et résumer l'action prévue de l'outil. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Formats de sortie de l'outil non géré ** + +* ** Problème: ** Le nœud d'outil sortira des données dans un format qui n'est pas attendu ou géré par les nœuds suivants dans le workflow, conduisant à des erreurs ou à un traitement incorrect. +* ** Exemple: ** Un nœud d'outil récupère les données à partir d'une API au format JSON, mais le nœud LLM suivant s'attend à une entrée de texte, provoquant une erreur d'analyse. +* ** Solution: ** Assurez-vous que le format de sortie de l'outil externe est compatible avec les exigences d'entrée des nœuds connectés à la sortie du nœud d'outil. +{% endtab%} +{% endtabs%} + +*** + +## 7. Node de condition + +Le nœud de condition agit comme un ** point de prise de décision dans les flux de travail d'agent séquentiel **, évaluant un ensemble de conditions prédéfinies pour déterminer le prochain chemin du flux. + +
    + +### Comprendre le nœud de condition + +Le nœud de condition est essentiel pour construire des workflows qui s'adaptent à différentes situations et entrées utilisateur. Il examine l'état actuel de la conversation, qui comprend tous les messages échangés et toutes les variables d'état personnalisées précédemment définies. Ensuite, en fonction de l'évaluation des conditions spécifiées dans la configuration du nœud, le nœud de condition dirige le débit vers l'une de ses sorties. + +Par exemple, une fois qu'un nœud agent ou LLM a fourni une réponse, un nœud de condition pourrait vérifier si la réponse contient un mot-clé spécifique ou si une certaine condition est remplie à l'état personnalisé. Si c'est le cas, le flux peut être dirigé vers un nœud d'agent pour une action supplémentaire. Sinon, cela pourrait conduire à un chemin différent, peut-être mettre fin à la conversation ou inviter l'utilisateur avec des questions supplémentaires. + +Cela nous permet de ** créer des branches dans notre flux de travail **, où le chemin emprunté dépend des données qui circulent dans le système. + +#### Voici une explication étape par étape de la façon dont cela fonctionne + +1. Le nœud de condition reçoit l'entrée de tout nœud précédent: nœud de démarrage, nœud d'agent, nœud LLM ou nœud d'outil. +2. Il a accès à l'historique complet de la conversation et à l'état personnalisé (le cas échéant), ce qui lui donne beaucoup de contexte avec lequel travailler. +3. Nous définissons une condition que le nœud évaluera. Cela pourrait être de vérifier les mots clés, la comparaison des valeurs dans l'état ou toute autre logique que nous pourrions implémenter via JavaScript. +4. En fonction de la question de savoir si la condition évalue à ** true ** ou ** false **, le nœud de condition envoie le débit dans l'un de ses chemins de sortie prédéfinis. Cela crée une "fourche sur la route" ou une branche pour notre flux de travail. + +### Comment mettre en place des conditions + +Le nœud de condition nous permet de définir la logique de branchement dynamique dans notre flux de travail en choisissant soit une interface basée sur une table ** ** ou un éditeur de code JavaScript ** ** pour définir les conditions qui contrôleront le flux de conversation. + +
    + + + + Conditions utilisant le code + +Le nœud ** condition utilise JavaScript ** pour évaluer les conditions spécifiques dans le flux de conversation. + +Nous pouvons configurer des conditions basées sur des mots clés, des changements d'état ou d'autres facteurs pour guider dynamiquement le flux de travail vers différentes branches en fonction du contexte de la conversation. Voici quelques exemples: + +** Condition du mot-clé ** + +Cela vérifie si un mot ou une phrase spécifique existe dans l'histoire de la conversation. + +* ** Exemple: ** Nous voulons vérifier si l'utilisateur a dit "oui" dans son dernier message. + +{% code overflow = "wrap"%} +```javascript +const lastMessage = $flow.state.messages[$flow.state.messages.length - 1].content; +return lastMessage.includes("yes") ? "Output 1" : "Output 2"; +``` +{% Endcode%} + +1. Ce code obtient le dernier message de State.Messages et vérifie s'il contient "oui". +2. Si "oui" est trouvé, le flux va à "Output 1"; Sinon, il va à "Output 2". + +** Condition de changement d'état ** + +Cela vérifie si une valeur spécifique à l'état personnalisé est passée à une valeur souhaitée. + +* ** Exemple: ** Nous suivons une variable Orderstatus notre état personnalisé, et nous voulons vérifier s'il est devenu "confirmé". + +{% code overflow = "wrap"%} +```javascript +return $flow.state.orderStatus === "confirmed" ? "Output 1" : "Output 2"; +``` +{% Endcode%} + +1. Ce code compare directement la valeur Orderstatus dans notre état personnalisé à "confirmé". +2. S'il correspond, le flux va à "Output 1"; Sinon, il va à "Output 2". + + + + + + Conditions utilisant le tableau + +Le nœud de condition nous permet de définir des conditions à l'aide d'une ** Interface de table conviviale **, ce qui facilite la création de workflows dynamiques sans écrire de code JavaScript. + +Vous pouvez configurer des conditions en fonction des mots clés, des changements d'état ou d'autres facteurs pour guider le flux de conversation le long de différentes branches. Voici quelques exemples: + +** Condition du mot-clé ** + +Cela vérifie si un mot ou une phrase spécifique existe dans l'histoire de la conversation. + +* ** Exemple: ** Nous voulons vérifier si l'utilisateur a dit "oui" dans son dernier message. +* **Installation** + + + + 1. Cette entrée de table vérifie si le contenu (.content) du dernier message (\ [- 1])`state.messages`est égal à "oui". + 2. Si la condition est remplie, le débit va à "Output 1". Sinon, le workflow est dirigé vers une sortie "end" par défaut. + +** Condition de changement d'état ** + +Cela vérifie si une valeur spécifique dans notre état personnalisé est passée à une valeur souhaitée. + +* ** Exemple: ** Nous suivons une variable Orderstatus dans notre état personnalisé, et nous voulons vérifier s'il est devenu "confirmé". +* **Installation** + + + + 1. Cette entrée de table vérifie si la valeur d'Orderstatus à l'état personnalisé est égale à "confirmée". + 2. Si la condition est remplie, le débit va à "Output 1". Sinon, le workflow est dirigé vers une sortie "end" par défaut. + + + +### Définition des conditions à l'aide de l'interface de table + +Cette approche visuelle vous permet de configurer facilement des règles qui déterminent le chemin de votre flux de conversation, en fonction de facteurs tels que l'entrée de l'utilisateur, de l'état actuel de la conversation ou des résultats des actions prises par d'autres nœuds. + + + + Tableau basé sur la table: Node de condition + +* ** Mise à jour le 09/08/2024 ** + + + + + +### Entrées + + + +{% hint style = "info"%} +Le nœud de condition ** nécessite au moins une connexion à partir des nœuds suivants **: nœud de démarrage, nœud d'agent, nœud LLM ou nœud d'outil. +{% EndHint%} + +### Sorties + +Le nœud de condition ** détermine dynamiquement son chemin de sortie en fonction des conditions prédéfinies **, en utilisant soit l'interface basée sur la table ou JavaScript. Cela offre une flexibilité dans la direction du flux de travail en fonction des évaluations des conditions. + +#### Logique d'évaluation des conditions + +* ** Conditions basées sur la table: ** Les conditions du tableau sont évaluées séquentiellement, de haut en bas. La première condition qui évalue à True déclenche sa sortie correspondante. Si aucune des conditions prédéfinies n'est remplie, le workflow est dirigé vers la sortie "end" par défaut. +* ** Conditions basées sur le code: ** Lorsque vous utilisez JavaScript, nous devons explicitement renvoyer le nom du chemin de sortie souhaité, y compris un nom pour la sortie "end" par défaut. +* ** Chemin de sortie unique: ** Un seul chemin de sortie est activé à la fois. Même si plusieurs conditions pouvaient être vraies, seule la première condition de correspondance détermine l'écoulement. + +#### Connexion des sorties + +Chaque sortie prédéfinie, y compris la sortie "end" par défaut, peut être connectée à l'un des nœuds suivants: + +* ** Node d'agent: ** Pour continuer la conversation avec un agent, en prenant potentiellement des mesures en fonction du résultat de la condition. +* ** Node LLM: ** Pour traiter l'historique actuel de l'état et de la conversation avec un LLM, générant des réponses ou prenant d'autres décisions. +* ** Node de fin: ** Pour terminer le flux de conversation. Si une sortie, y compris la sortie "end" par défaut, est connectée à un nœud de fin, le nœud de condition sortira la dernière réponse du nœud précédent et terminera le flux de travail. +* ** Node de boucle: ** Pour rediriger le flux vers un nœud séquentiel précédent, permettant des processus itératifs en fonction du résultat de la condition. + +### Configuration du nœud + + + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Condition claire nommer ** + +Utilisez des noms descriptifs pour vos conditions (par exemple, "Si l'utilisateur est inférieur à 18 ans, alors agent de conseiller politique", "Si la commande est confirmée, alors terminez le nœud") pour rendre votre flux de travail plus facile à comprendre et à déboguer. + +** Prioriser les conditions simples ** + +Commencez par des conditions simples et ajoutez progressivement la complexité au besoin. Cela rend votre flux de travail plus gérable et réduit le risque d'erreurs. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Conception de la logique et du workflow Conception ** ** + +* ** Problème: ** Les conditions que vous définissez dans le nœud de condition ne reflètent pas avec précision la logique prévue de votre flux de travail, conduisant à une ramification inattendue ou à des chemins d'exécution incorrects. +* ** Exemple: ** Vous configurez une condition pour vérifier si l'âge de l'utilisateur est supérieur à 18 ans, mais le chemin de sortie de cette condition conduit à une section conçue pour les utilisateurs de moins de 18 ans. +* ** Solution: ** Passez en revue vos conditions et assurez-vous que les chemins de sortie associés à chaque condition correspondent à la logique du flux de travail prévu. Utilisez des noms clairs et descriptifs pour vos sorties pour éviter la confusion. + +** Gestion insuffisante de l'État ** + +* ** Problème: ** Le nœud de condition repose sur une variable d'état personnalisée qui n'est pas mise à jour correctement, conduisant à des évaluations de conditions inexactes et à une branche incorrecte. +* ** Exemple: ** Vous suivez une variable "UserLocation" dans l'état personnalisé, mais la variable n'est pas mise à jour lorsque l'utilisateur fournit son emplacement. Le nœud de condition évalue la condition en fonction de la valeur obsolète, conduisant à un chemin incorrect. +* ** Solution: ** Assurez-vous que toutes les variables d'état personnalisées utilisées dans vos conditions sont mises à jour correctement tout au long du workflow. +{% endtab%} +{% endtabs%} + +*** + +## 8. Nœud d'agent de condition + +Le nœud d'agent de condition fournit ** le routage dynamique et intelligent dans les flux d'agent séquentiels **. Il combine les capacités du nœud ** llm ** (sortie structurée LLM et JSON) et le nœud ** de condition ** (conditions définies par l'utilisateur), ce qui nous permet de tirer parti du raisonnement basé sur l'agent et de la logique conditionnelle dans un seul nœud. + +
    + +### Fonctionnalités clés + +* ** Routage basé sur des agents unifiés: ** combine le raisonnement d'agent, la sortie structurée et la logique conditionnelle dans un seul nœud, simplifiant la conception du flux de travail. +* ** Conscience contextuelle: ** L'agent examine tout l'historique de conversation et tout état personnalisé lors de l'évaluation des conditions. +* ** Flexibilité: ** fournit à la fois des options basées sur la table et basées sur le code pour définir des conditions, lors de la réalisation de différentes préférences et niveaux de compétences des utilisateurs. + +### Configuration du nœud d'agent de condition + +Le nœud d'agent de condition agit comme un agent spécialisé qui peut à la fois ** traiter les informations et prendre des décisions de routage **. Voici comment le configurer: + +1. ** Définissez le personnage de l'agent ** + * Dans le champ "Invite System", fournissez une description claire et concise du rôle de l'agent et de la tâche dont il a besoin pour effectuer pour le routage conditionnel. Cette invite guidera la compréhension de l'agent de la conversation et de son processus décisionnel. +2. ** Structure la sortie de l'agent (facultatif) ** + * Si vous souhaitez que l'agent produise une sortie structurée, utilisez la fonctionnalité "Sortie structurée JSON". Définissez le schéma souhaité pour la sortie, en spécifiant les clés, les types de données et toutes les valeurs d'énumération. Cette sortie structurée sera utilisée par l'agent lors de l'évaluation des conditions. +3. ** Définir les conditions ** + * Choisissez l'interface basée sur la table ou l'éditeur de code JavaScript pour définir les conditions qui détermineront le comportement de routage. + * ** Interface basée sur la table: ** Ajouter des lignes au tableau, en spécifiant la variable à vérifier, l'opération de comparaison, la valeur à comparer et le nom de sortie à suivre si la condition est remplie. + * ** Code JavaScript: ** Écrivez des extraits JavaScript personnalisés pour évaluer les conditions. Utiliser le`return`Instruction pour spécifier le nom du chemin de sortie à suivre en fonction du résultat de la condition. +4. ** Connectez les sorties ** + * Connectez chaque sortie prédéfinie, y compris la sortie "end" par défaut, au nœud ultérieur approprié dans le flux de travail. Il peut s'agir d'un nœud d'agent, d'un nœud LLM, d'un nœud de boucle ou d'un nœud final. + +### Comment mettre en place des conditions + +Le nœud d'agent de condition nous permet de définir la logique de branchement dynamique dans notre flux de travail en choisissant soit une interface basée sur la table ** ** ou un éditeur de code JavaScript ** ** pour définir les conditions qui contrôleront le flux de conversation. + +
    + + + + Conditions utilisant le code + +Le nœud d'agent de condition, comme le nœud de condition **, utilise le code JavaScript pour évaluer les conditions spécifiques ** dans le flux de conversation. + +Cependant, le nœud d'agent de condition peut évaluer les conditions en fonction d'une gamme plus large de facteurs, notamment des mots clés, des changements d'état et le contenu de sa propre sortie (soit en tant que texte de forme libre, soit des données JSON structurées). Cela permet des décisions de routage plus nuancées et consacrées au contexte. Voici quelques exemples: + +** Condition du mot-clé ** + +Cela vérifie si un mot ou une phrase spécifique existe dans l'histoire de la conversation. + +* ** Exemple: ** Nous voulons vérifier si l'utilisateur a dit "oui" dans son dernier message. + +{% code overflow = "wrap"%} +```javascript +const lastMessage = $flow.state.messages[$flow.state.messages.length - 1].content; +return lastMessage.includes("yes") ? "Output 1" : "Output 2"; +``` +{% Endcode%} + +1. Ce code obtient le dernier message de State.Messages et vérifie s'il contient "oui". +2. Si "oui" est trouvé, le flux va à "Output 1"; Sinon, il va à "Output 2". + +** Condition de changement d'état ** + +Cela vérifie si une valeur spécifique à l'état personnalisé est passée à une valeur souhaitée. + +* ** Exemple: ** Nous suivons une variable Orderstatus notre état personnalisé, et nous voulons vérifier s'il est devenu "confirmé". + +{% code overflow = "wrap"%} +```javascript +return $flow.state.orderStatus === "confirmed" ? "Output 1" : "Output 2"; +``` +{% Endcode%} + +1. Ce code compare directement la valeur Orderstatus dans notre état personnalisé à "confirmé". +2. S'il correspond, le flux va à "Output 1"; Sinon, il va à "Output 2". + + + + + + Conditions utilisant le tableau + +Le nœud d'agent de condition fournit également une interface de table ** conviviale pour la définition des conditions **, similaire au nœud de condition. Vous pouvez configurer des conditions en fonction des mots clés, des modifications d'état ou de la propre sortie de l'agent, vous permettant de créer des workflows dynamiques sans écrire de code JavaScript. + +Cette approche basée sur une table simplifie la gestion des conditions et facilite la visualisation de la logique de ramification. Voici quelques exemples: + +** Condition du mot-clé ** + +Cela vérifie si un mot ou une phrase spécifique existe dans l'histoire de la conversation. + +* ** Exemple: ** Nous voulons vérifier si l'utilisateur a dit "oui" dans son dernier message. +* **Installation** + +
    variable opération Nom
    $ Flow.State.Messages [-1] .Content est oui Sortie 1
    variable opération Nom
    $ Flow.State.Orderstatus est CONFORMÉ Sortie 1
    Description Options / Syntax
    variable - le variable ou l'élément de données à évaluer dans la condition.$flow.state.messages.length (Total Messages)
    - $Flow.State.Messages [0] .Con (First Message Content)
    - $flow.state.messages[-1].con (Last Message Content)
    - $Vars. (variable globale)
    Opération - La comparaison ou le fonctionnement logique pour effectuer sur la variable. - Contient
    - ne contient pas
    - est
    - ENTRÉE
    - Is
    - est
    - est
    - ENTROY Pas vide
    - supérieur à
    - inférieur à
    - égal à
    - pas égal à
    - supérieur ou égal à
    - inférieur ou égal à
    la valeur pour comparer la variable. Examples: "yes", 10, "Hello"
    Output NameThe name of the output path to follow if the condition evaluates to true.- User-defined name (e.g., "Agent1", "End", "Loop")
    requis Description
    Démarrer le nœud oui reçoit l'état de l'état du nœud de début. Cela permet au nœud de condition d'évaluer les conditions en fonction du contexte initial de la conversation , y compris tout état personnalisé.
    Node d'agent oui reçoit la sortie du nœud de l'agent. Cela permet au nœud de condition de prendre prendre des décisions en fonction des actions de l'agent et de l'historique de conversation, y compris tout état personnalisé.
    llm nœud Oui reçoit la sortie du nœud LLM. Cela permet au nœud de condition d'évaluer les conditions en fonction de la réponse de la LLM et de l'historique de conversation, y compris tout état personnalisé.
    Le nœud d'outil oui reçoit la sortie du nœud de l'outil. Cela permet au nœud de condition prendre des décisions en fonction des résultats de l'exécution de l'outil et de l'historique de conversation, y compris tout état personnalisé.
    requis Description
    Nom du nœud de condition non un nom facultatif, le nom de réel humain pour la condition. Ceci est utile pour comprendre le flux de travail en un coup d'œil.
    condition Oui C'est là que nous définir la logique qui sera évaluée pour déterminer les sorties de sortie .
    + + 1. Cette entrée de table vérifie si le contenu (.content) du dernier message (\ [- 1])`state.messages`est égal à "oui". + 2. Si la condition est remplie, le débit va à "Output 1". Sinon, le workflow est dirigé vers une sortie "end" par défaut. + +** Condition de changement d'état ** + +Cela vérifie si une valeur spécifique dans notre état personnalisé est passée à une valeur souhaitée. + +* ** Exemple: ** Nous suivons une variable Orderstatus dans notre état personnalisé, et nous voulons vérifier s'il est devenu "confirmé". +* **Installation** + + + + 1. Cette entrée de table vérifie si la valeur d'Orderstatus à l'état personnalisé est égale à "confirmée". + 2. Si la condition est remplie, le débit va à "Output 1". Sinon, le workflow est dirigé vers une sortie "end" par défaut. + + + +### Définition des conditions à l'aide de l'interface de table + +Cette approche visuelle vous permet de configurer facilement des règles qui déterminent le chemin de votre flux de conversation, en fonction de facteurs tels que l'entrée de l'utilisateur, de l'état actuel de la conversation ou des résultats des actions prises par d'autres nœuds. + + + + Tableau basé sur la table: Condition d'agent Node + +* ** Mise à jour le 09/08/2024 ** + + + + + +### Entrées + + + +{% hint style = "info"%} +Le nœud d'agent de condition ** nécessite au moins une connexion à partir des nœuds suivants **: nœud de démarrage, nœud d'agent, nœud LLM ou nœud d'outil. +{% EndHint%} + +### Configuration du nœud + + + +### Sorties + +Le nœud d'agent de condition, comme le nœud de condition **, détermine dynamiquement son chemin de sortie en fonction des conditions définies **, en utilisant soit l'interface basée sur la table ou JavaScript. Cela offre une flexibilité dans la direction du flux de travail en fonction des évaluations des conditions. + +#### Logique d'évaluation des conditions + +* ** Conditions basées sur la table: ** Les conditions du tableau sont évaluées séquentiellement, de haut en bas. La première condition qui évalue à True déclenche sa sortie correspondante. Si aucune des conditions prédéfinies n'est remplie, le workflow est dirigé vers la sortie "end" par défaut. +* ** Conditions basées sur le code: ** Lorsque vous utilisez JavaScript, nous devons explicitement renvoyer le nom du chemin de sortie souhaité, y compris un nom pour la sortie "end" par défaut. +* ** Chemin de sortie unique: ** Un seul chemin de sortie est activé à la fois. Même si plusieurs conditions pouvaient être vraies, seule la première condition de correspondance détermine l'écoulement. + +#### Connexion des sorties + +Chaque sortie prédéfinie, y compris la sortie "end" par défaut, peut être connectée à l'un des nœuds suivants: + +* ** Node d'agent: ** Pour continuer la conversation avec un agent, en prenant potentiellement des mesures en fonction du résultat de la condition. +* ** Node LLM: ** Pour traiter l'historique actuel de l'état et de la conversation avec un LLM, générant des réponses ou prenant d'autres décisions. +* ** Node de fin: ** Pour terminer le flux de conversation. Si la sortie "end" par défaut est connectée à un nœud de fin, le nœud de condition sortira la dernière réponse du nœud précédent et terminera la conversation. +* ** Node de boucle: ** Pour rediriger le flux vers un nœud séquentiel précédent, permettant des processus itératifs en fonction du résultat de la condition. + +#### Différences clés par rapport au nœud de condition + +* Le nœud d'agent de condition ** intègre le raisonnement d'un agent ** et la sortie structurée dans le processus d'évaluation de la condition. +* Il fournit une approche plus intégrée du routage des conditions basées sur les agents. + +### Paramètres supplémentaires + + + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Créez une invite de système claire et ciblée ** + +Fournissez une personnalité bien définie et des instructions claires à l'agent dans l'invite du système. Cela guidera son raisonnement et l'aidera à générer une sortie pertinente pour la logique conditionnelle. + +** Structure Sortie pour des conditions fiables ** + +Utilisez la fonction de sortie structurée JSON pour définir un schéma pour la sortie de l'agent de condition. Cela garantira que la sortie est cohérente et facilement sanitaire, ce qui le rend plus fiable pour une utilisation dans les évaluations conditionnelles. +{% endtab%} + +{% tab title = "Pièges potentiels"%} +** Route peu fiable en raison de la sortie non structurée ** + +* ** Problème: ** Le nœud d'agent de condition n'est pas configuré pour sortir des données JSON structurées, conduisant à des formats de sortie imprévisibles qui peuvent rendre difficile la définition de conditions fiables. +* ** Exemple: ** Le nœud de l'agent de condition est invité à déterminer le sentiment de l'utilisateur (positif, négatif, neutre) mais publie son évaluation en tant que chaîne de texte de forme libre. La variabilité de la langue de l'agent rend difficile de créer des conditions précises dans le tableau ou le code conditionnel. +* ** Solution: ** Utilisez la fonction de sortie structurée JSON pour définir un schéma pour la sortie de l'agent. Par exemple, spécifiez une clé "Sentiment" avec une énumération de "positif", "négatif" et "neutre". Cela garantira que la sortie de l'agent est systématiquement structurée, ce qui facilite la création de conditions fiables. +{% endtab%} +{% endtabs%} + +*** + +## 9. Nœud de boucle + +Le nœud de boucle nous permet de créer des boucles dans notre flux conversationnel, ** redirigeant la conversation vers un point spécifique **. Ceci est utile pour les scénarios où nous devons répéter une certaine séquence d'actions ou de questions basées sur l'entrée utilisateur ou les conditions spécifiques. + +
    + +### Comprendre le nœud de boucle + +Le nœud de boucle agit comme un connecteur, redirigeant le flux vers un point spécifique dans le graphique, nous permettant de créer des boucles dans notre flux conversationnel. ** Il passe l'état actuel, qui comprend la sortie du nœud précédant le nœud de boucle à notre nœud cible. ** Ce transfert de données permet à notre nœud cible de traiter les informations à partir de l'itération précédente de la boucle et ajuster son comportement en conséquence. + +Par exemple, disons que nous construisons un chatbot qui aide les utilisateurs à réserver des vols. Nous pourrions utiliser une boucle pour affiner itérativement les critères de recherche en fonction des commentaires des utilisateurs. + +#### Voici comment le nœud de boucle pourrait être utilisé + +1. ** Node LLM (recherche initiale): ** Le nœud LLM reçoit la demande de vol initiale de l'utilisateur (par exemple, "Trouvez des vols de Madrid à New York en juillet"). Il interroge une API de recherche de vols et renvoie une liste de vols possibles. +2. ** Node d'agent (options actuelles): ** Le nœud d'agent présente les options de vol à l'utilisateur et demande s'ils souhaitent affiner leur recherche (par exemple, "Souhaitez-vous filtrer par prix, compagnie aérienne ou heure de départ?"). +3. ** Node d'agent de condition: ** Le nœud d'agent de condition vérifie la réponse de l'utilisateur et a deux sorties: + * ** Si l'utilisateur souhaite affiner: ** Le flux va au nœud LLM "Recherche affiner". + * ** Si l'utilisateur est satisfait des résultats: ** Le flux passe au processus de réservation. +4. ** Node LLM (Recherche de recherche): ** Ce nœud LLM rassemble les critères de raffinement de l'utilisateur (par exemple, "Montrez-moi uniquement des vols en moins de 500 $") et met à jour l'état avec les nouveaux paramètres de recherche. +5. ** Node de boucle: ** Le nœud de boucle redirige le flux vers le nœud LLM initial ("Recherche initiale"). Il transmet l'état mis à jour, qui comprend désormais les critères de recherche raffinés. +6. ** itération: ** Le nœud LLM initial effectue une nouvelle recherche en utilisant les critères raffinés, et le processus se répète à partir de l'étape 2. + +** Dans cet exemple, le nœud de boucle permet un processus de raffinement de recherche itératif. ** Le système peut continuer à faire boucler et affiner les résultats de recherche jusqu'à ce que l'utilisateur soit satisfait des options présentées. + +### Entrées + +
    + +{% hint style = "info"%} +Le nœud de boucle ** nécessite au moins une connexion à partir des nœuds suivants **: nœud d'agent, nœud LLM, nœud d'outil, nœud de condition ou nœud d'agent de condition. +{% EndHint%} + +### Configuration du nœud + + + +### Sorties + +Le nœud de boucle ** n'a pas de connexions de sortie directe **. Il redirige le flux vers le nœud séquentiel spécifique dans le graphique. + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Objectif de boucle claire ** + +Définissez un objectif clair pour chaque boucle de votre flux de travail. Si possible, documentez avec une note collante ce que vous essayez de réaliser avec la boucle. +{% endtab%} + +{% tab title = "Pièges de potential"%} +** Structure de flux de travail confuse ** + +* ** Problème: ** Des boucles excessives ou mal conçues rendent le flux de travail difficile à comprendre et à entretenir. +* ** Exemple: ** Vous utilisez plusieurs boucles imbriquées sans but ou étiquettes claires, ce qui rend difficile de suivre le flux de la conversation. +* ** Solution: ** Utilisez des boucles avec parcimonie et uniquement lorsque cela est nécessaire. Documentez clairement vos nœuds de boucle et les nœuds auxquels ils se connectent. + +** Boucles infinies en raison de conditions de sortie manquantes ou incorrectes ** + +* ** Problème: ** La boucle ne se termine jamais car la condition qui devrait déclencher la sortie de la boucle est manquante ou défini à tort. +* ** Exemple: ** Un nœud de boucle est utilisé pour collecter itérativement les informations utilisateur. Cependant, le flux de travail n'a pas de nœud d'agent conditionnel pour vérifier si toutes les informations requises ont été collectées. En conséquence, la boucle se poursuit indéfiniment, demandant à plusieurs reprises à l'utilisateur les mêmes informations. +* ** Solution: ** Définissez toujours des conditions de sortie claires et précises pour les boucles. Utilisez des nœuds de condition pour vérifier les variables d'état, l'entrée de l'utilisateur ou d'autres facteurs qui indiquent quand la boucle doit se terminer. +{% endtab%} +{% endtabs%} + +*** + +## 10. Node de fin + +Le nœud de fin marque le point de terminaison définitif ** de la conversation ** dans un flux de travail d'agent séquentiel. Il signifie qu'aucun traitement, actions ou interactions supplémentaires n'est requis. + +
    + +### Comprendre le nœud final + +Le nœud final sert de signal dans l'architecture d'agent séquentiel de Flowise, ** indiquant que la conversation a atteint sa conclusion prévue **. En atteignant le nœud final, le système "comprend" que l'objectif conversationnel a été atteint et qu'aucune autre actions ou interactions n'est requise dans le flux. + +### Entrées + +
    Fin de l'exécution du nœud de condition. + +### Choisir le bon nœud + +* ** Node de condition: ** Utilisez le nœud de condition lorsque votre logique de routage implique des décisions simples en fonction de conditions facilement définissables. Par exemple, il est parfait pour vérifier les mots clés spécifiques, la comparaison des valeurs dans l'état ou l'évaluation d'autres expressions logiques simples. +* ** Node d'agent de condition: ** Cependant, lorsque votre routage exige une compréhension plus profonde des nuances de la conversation, le nœud d'agent de condition est le meilleur choix. Ce nœud agit comme votre assistant de routage intelligent, tirant parti d'un LLM pour analyser la conversation, faire des jugements en fonction du contexte et fournir une sortie structurée qui pilote un routage plus sophistiqué et dynamique. + +*** + +## Node d'agent vs nœud LLM + +Il est important de comprendre que le nœud ** llm et le nœud d'agent peuvent être considérés comme des entités agentiques au sein de notre système **, car ils exploitent tous les deux les capacités d'un modèle de langue grand (LLM) ou d'un modèle de chat. + +Cependant, bien que les deux nœuds puissent traiter le langage et interagir avec les outils, ils sont conçus à différentes fins dans un flux de travail. + + + + Node d'agent + +**Se concentrer** + +L'objectif principal du nœud d'agent pour simuler les actions et la prise de décision d'un agent humain dans un contexte conversationnel. + +Il agit comme un coordinateur de haut niveau au sein du flux de travail, réunissant la compréhension du langage, l'exécution des outils et la prise de décision pour créer une expérience conversationnelle plus humaine. + +** Forces ** + +* Gère efficacement l'exécution de plusieurs outils et intègre leurs résultats. +* Offre une prise en charge intégrée pour l'homme en boucle (HITL), permettant l'examen humain et l'approbation des opérations sensibles. + +** le mieux adapté pour ** + +* Les flux de travail où l'agent doit guider l'utilisateur, collecter des informations, faire des choix et gérer le flux de conversation global. +* Scénarios nécessitant une intégration avec plusieurs outils externes. +* Des tâches impliquant des données sensibles ou des actions où la surveillance humaine est bénéfique, comme l'approbation de la transaction financière + + + + + + llm nœud + +**Se concentrer** + +Semblable au nœud d'agent, mais il offre plus de flexibilité lors de l'utilisation d'outils et de boucle humaine (HITL), tous deux via le nœud d'outil. + +** Forces ** + +* Permet à la définition des schémas JSON de structurer la sortie de LLM, ce qui facilite l'extraction d'informations spécifiques. +* Offre une flexibilité dans l'intégration des outils, permettant des séquences plus complexes de LLM et d'appels d'outils, et fournissant un contrôle à grain fin sur la fonction HITL. + +** le mieux adapté pour ** + +* Scénarios où les données structurées doivent être extraites de la réponse du LLM. +* Les workflows nécessitant un mélange d'exécutions d'outils automatisées et évaluées par l'homme. Par exemple, un nœud LLM peut appeler un outil pour récupérer les informations du produit (automatisé), puis un outil différent pour traiter un paiement, qui nécessiterait l'approbation de HITL. + + + +### Résumant + + + +### Choisir le bon nœud + +* ** Choisissez le nœud d'agent: ** Utilisez le nœud d'agent lorsque vous devez créer un système conversationnel qui peut gérer l'exécution de plusieurs outils, tous partageant le même paramètre HITL (activé ou désactivé pour le nœud d'agent entier). Le nœud d'agent est également bien adapté pour gérer les conversations complexes en plusieurs étapes où un comportement cohérent de type agent est souhaité. +* ** Choisissez le nœud LLM: ** D'un autre côté, utilisez le nœud LLM lorsque vous devez extraire des données structurées à partir de la sortie de LLM à l'aide de la fonction de schéma JSON, une capacité non disponible dans le nœud d'agent. Le nœud LLM excelle également à l'orchestration de l'exécution de l'outil avec un contrôle à grain fin sur HITL au niveau de l'outil individuel, vous permettant de mélanger les exécutions d'outils automatisées et évaluées par l'homme en utilisant plusieurs nœuds d'outils connectés au nœud LLM. + +[^ 1]: Dans notre contexte actuel, un niveau d'abstraction inférieur fait référence à un système qui expose un plus grand degré de détail au développeur. diff --git a/fr/using-flowise/agentflowv1/sequential-agents/video-tutorials.md b/fr/using-flowise/agentflowv1/sequential-agents/video-tutorials.md new file mode 100644 index 00000000..60c2d321 --- /dev/null +++ b/fr/using-flowise/agentflowv1/sequential-agents/video-tutorials.md @@ -0,0 +1,35 @@ +--- +description: Learn Sequential Agents from the Community +--- + +# Tutoriels vidéo + +### Construisez un agent de chiffon à plusieurs étapes + +Dans cette vidéo,[Leon](https://youtube.com/@leonvanzyl)Fournit un tutoriel étape par étape sur la création d'un agent de chiffon avancé qui intègre des techniques de routage, de secours et d'auto-correction. + +{% embed url = "https://youtu.be/oejuvdyn_u8"%} + +### Master Agents Sequential: Créez des applications AI complexes avec Flowise + +Dans cette vidéo,[Leon](https://youtube.com/@leonvanzyl)Fournit une ** introduction complète à l'architecture d'agent séquentiel ** et montre comment gérer l'état personnalisé pour créer des applications plus dynamiques. + +{% embed url = "https://www.youtube.com/watch?v=6LBVGTBS0BE"%} + +### Séquentiel vs multi-agents: quelle fonction fluidise vous convient? + +Dans cette vidéo,[Leon](https://youtube.com/@leonvanzyl)Semble deux solutions différentes en flux pour créer des projets multi-agents. Il compare les ** différences entre les agents séquentiels et les multi-agents ** en recréant les mêmes projets en utilisant les deux techniques. + +{% embed url = "https://www.youtube.com/watch?v=3ZMBQ8_4VCS"%} + +### Construisez les applications prêtes à la production en quelques minutes: ** Agents séquentiels de Flowise et N8N + +Dans cette vidéo,[Wntrmute AI](https://www.youtube.com/@WntrmuteAI)montre comment construire rapidement une application ** prête à la production ** en moins de 30 minutes en combinant ** les agents séquentiels de Flowise ** et ** n8n **. + +{% embed url = "https://www.youtube.com/watch?v=da_0eotynmc"%} + +### Comment construire une IA auto-améliorée avec un chiffon agentique et floquer + +Dans cette vidéo,[Leon](https://youtube.com/@leonvanzyl)Vous montrera comment construire une application de chiffon auto-corrigé à l'aide des agents séquentiels de Flowiseai. Le chiffon agentique est une approche puissante pour créer des solutions d'IA qui peuvent apprendre et améliorer leurs réponses au fil du temps. + +{% embed url = "https://www.youtube.com/watch?v=SL77OJBGY6U"%} diff --git a/fr/using-flowise/agentflowv2.md b/fr/using-flowise/agentflowv2.md new file mode 100644 index 00000000..494f2500 --- /dev/null +++ b/fr/using-flowise/agentflowv2.md @@ -0,0 +1,1328 @@ +--- +description: Learn how to build multi-agents system using Agentflow V2, written by @toi500 +--- + +# Agentflow v2 + +Ce guide explore l'architecture AgentFlow V2, détaillant ses concepts principaux, ses cas d'utilisation, son état de flux et ses références de nœuds complets. + +{% hint style = "avertissement"%} +** Avis de non-responsabilité: ** Cette documentation décrit AgentFlow V2 à sa version officielle actuelle. Les fonctionnalités, les fonctionnalités et les paramètres de nœud sont soumis à un changement dans les futures mises à jour et versions de Flowise. Veuillez vous référer aux dernières notes de publication officielle ou à des informations sur l'application pour les détails les plus récents. +{% EndHint%} + +{% embed url = "https://youtu.be/-h4wquzrhhi?si=jkhuefiw06ao6ge"%} + +## Concept de base + +AgentFlow V2 représente une évolution architecturale significative, introduisant un nouveau paradigme en flux qui se concentre sur une orchestration explicite du flux de travail et une flexibilité accrue. Contrairement à la dépendance principale de V1 sur les cadres externes pour sa logique de graphique d'agent de base, V2 déplace l'attention de la conception de l'ensemble du flux de travail en utilisant un ensemble granulaire de nœuds autonomes spécialisés développés nativement en tant que composants coulissants principaux. + +Dans cette architecture V2, chaque nœud fonctionne comme une unité indépendante, exécutant une opération discrète en fonction de sa conception et de sa configuration spécifiques. Les connexions visuelles entre les nœuds de la canevas définissent explicitement le chemin de travail et la séquence de contrôle du workflow, les données peuvent être transmises entre les nœuds en faisant référence aux sorties de tout nœud précédemment exécuté dans le flux actuel, et l'état de flux fournit un mécanisme explicite pour gérer et partager des données tout au long du flux de travail. + +L'architecture V2 met en œuvre un système complet de la dépendance aux nœuds et de la file d'attente d'exécution qui respecte précisément ces voies définies tout en maintenant une séparation claire entre les composants, permettant aux flux de travail de devenir à la fois plus sophistiqués et plus faciles à concevoir. Cela permet aux modèles complexes comme les boucles, la ramification conditionnelle, les interactions humaines dans la boucle et d'autres à être réalisables. Cela le rend plus adaptable à divers cas d'utilisation tout en restant plus maintenable et extensible. + +
    + +## Différence entre AgentFlow et Plateforme d'automatisation + +L'une des questions les plus posées: quelle est la différence entre AgentFlow et les plates-formes d'automatisation comme N8N, Make ou Zapier? + +### 💬 ** Communication d'agent à agent ** + +La communication multimodale entre les agents est prise en charge. Un agent de superviseur peut formuler et déléguer des tâches à plusieurs agents de travailleurs, avec des sorties des agents des travailleurs retournés par la suite au superviseur. + +À chaque étape, les agents ont accès à l'historique complet de la conversation, permettant au superviseur de déterminer la tâche suivante et les agents des travailleurs pour interpréter la tâche, sélectionner les outils appropriés et exécuter les actions en conséquence. + +Cette architecture permet ** la collaboration, la délégation et la gestion des tâches partagées ** sur plusieurs agents, ces capacités ne sont généralement pas offertes par les outils d'automatisation traditionnels. + +
    + +### 🙋‍ Human dans la boucle + +L'exécution est interrompue en attendant l'entrée humaine, sans bloquer le thread en cours d'exécution. Chaque point de contrôle est enregistré, permettant au flux de travail de reprendre à partir du même point même après un redémarrage de l'application. + +L'utilisation de points de contrôle permet ** les agents de longue durée et avec état **. + +Les agents peuvent également être configurés pour ** demander l'autorisation avant d'exécuter des outils **, similaire à la façon dont Claude demande l'approbation de l'utilisateur avant d'utiliser les outils MCP. Cela permet d'empêcher l'exécution autonome d'actions sensibles sans l'approbation explicite de l'utilisateur. + +
    + +### 📖 État partagé + +L'état partagé permet l'échange de données entre les agents, particulièrement utile pour passer des données entre les branches ou les étapes non adjacentes d'un flux. Se référer à[#understanding-flow-state](agentflowv2.md#understanding-flow-state "mention") + +### ⚡ Streaming + +Prend en charge les événements de serveur (SSE) pour le streaming en temps réel de réponses LLM ou d'agent. Le streaming permet également aux mises à jour de l'abonnement à l'exécution au fur et à mesure que le flux de travail progresse. + +
    + +### 🌐 outils MCP + +Alors que les plates-formes d'automatisation traditionnelles présentent souvent de vastes bibliothèques d'intégrations prédéfinies, AgentFlow permet à MCP ([Model Context Protocol](https://github.com/modelcontextprotocol)) outils à connecter dans le cadre du flux de travail, plutôt que de fonctionner uniquement en tant qu'outils d'agent. + +Les MCP personnalisés peuvent également être créés indépendamment, sans dépendre des intégrations fournies par la plate-forme. MCP est largement considéré comme une norme de l'industrie et est généralement soutenu et maintenu par les prestataires officiels. Par exemple, le GitHub MCP est développé et maintenu par l'équipe GitHub, avec un soutien similaire fourni pour Atlassian Jira, Brave Search, et autres. + +
    + +## Référence du nœud AgentFlow V2 + +Cette section fournit une référence détaillée pour chaque nœud disponible, décrivant son objectif spécifique, les paramètres de configuration des clés, les entrées attendues, les sorties générées et son rôle dans l'architecture AgentFlow V2. + + + +*** + +### ** 1. Démarrer le nœud ** + +Le point d'entrée désigné pour lancer n'importe quelle exécution de workflow AgentFlow V2. Chaque flux doit commencer par ce nœud. + +* ** Fonctionnalité: ** Définit comment le flux de travail est déclenché et configure les conditions initiales. Il peut accepter les entrées directement à partir de l'interface de chat ou via un formulaire personnalisable présenté à l'utilisateur. Il permet également l'initialisation de`Flow State`Variables au début de l'exécution et peut gérer la façon dont la mémoire de conversation est gérée pour l'exécution. +* ** Paramètres de configuration ** + * ** Type d'entrée **: détermine comment l'exécution du flux de travail est initiée, soit par`Chat Input`de l'utilisateur ou via un soumis`Form Input`. + * ** Titre du formulaire, description du formulaire, types d'entrée de formulaire **: If`Form Input`est sélectionné, ces champs configurent l'apparence du formulaire présenté à l'utilisateur, permettant divers types de champs de saisie avec des étiquettes définies et des noms de variables. + * ** Mémoire éphémère **: si elle est activée, demande au workflow de commencer l'exécution sans considérer les messages passés du thread de conversation, en commençant efficacement par une ardoise de mémoire propre. + * ** État de flux **: définit l'ensemble complet des paires de valeurs clés initiales pour l'état d'exécution du workflow`$flow.state`. Toutes les clés d'état qui seront utilisées ou mises à jour par les nœuds suivantes doivent être déclarées et initialisées ici. +* ** Entrées: ** Reçoit les données initiales qui déclenchent le workflow, qui sera soit un message de chat, soit les données soumises via un formulaire. +* ** Sorties: ** Fournit une seule ancre de sortie pour se connecter au premier nœud opérationnel, passant les données d'entrée initiales et l'état de flux initialisé. + +
    + +*** + +### ** 2. Node LLM ** + +Fournit un accès direct à un modèle de grande langue (LLM) configuré pour exécuter des tâches AI, permettant au workflow d'effectuer une extraction structurée de données si nécessaire. + +* ** Fonctionnalité: ** Ce nœud envoie des demandes à un LLM basé sur des instructions (messages) et un contexte fourni. Il peut être utilisé pour la génération de texte, le résumé, la traduction, l'analyse, la réponse aux questions et la génération de sortie JSON structurée selon un schéma défini. Il a accès à la mémoire pour le thread de conversation et peut lire / écrire`Flow State`. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O d'OpenAI ou Google Gemini. + * ** Messages **: Définissez l'entrée conversationnelle pour le LLM, en la structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la réponse de l'IA. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Mémoire **: Si vous activez, détermine si le LLM doit considérer l'historique du thread de conversation actuel lors de la génération de sa réponse. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté au LLM - par exemple, s'il faut inclure tous les messages, seulement une fenêtre récente de virages ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** Retour Response As **: Configure comment la sortie de LLM est classée - comme un`User Message`ou`Assistant Message`- qui peut influencer la façon dont il est géré par les systèmes de mémoire ou la journalisation ultérieurs. + * ** Sortie structurée JSON **: Demande au LLM de formater sa sortie en fonction d'un schéma JSON spécifique - y compris des clés, des types de données et des descriptions - garantissant des données prévisibles et lisibles par la machine. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud LLM sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise des données du déclencheur initial du workflow ou des sorties des nœuds précédents, incorporant ces données dans le`Messages`ou`Input Message`champs. Il peut également récupérer des valeurs de`$flow.state`Lorsque les variables d'entrée le font référence. +* ** Sorties: ** produit la réponse de LLM, qui sera soit du texte brut, soit un objet JSON structuré. La catégorisation de cette sortie - en tant qu'utilisateur ou assistant - est déterminée par le`Return Response`paramètre. + +
    + +*** + +### ** 3. Node d'agent ** + +Représente une entité d'IA autonome capable de raisonner, de planifier et d'interagir avec des outils ou des sources de connaissances pour atteindre un objectif donné. + +* ** Fonctionnalité: ** Ce nœud utilise un LLM pour décider dynamiquement d'une séquence d'actions. En fonction de l'objectif de l'utilisateur - fourni via des messages / entrées - il peut choisir d'utiliser des outils disponibles ou des magasins de documents de requête pour recueillir des informations ou effectuer des actions. Il gère son propre cycle de raisonnement et peut utiliser la mémoire pour le fil de conversation et`Flow State`. Convient aux tâches nécessitant un raisonnement en plusieurs étapes ou interagissant dynamiquement avec des systèmes ou des outils externes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O ou Google Gemini d'OpenAI - qui conduira les processus de raisonnement et de prise de décision de l'agent. + * ** Messages **: Définissez l'entrée conversationnelle initiale, l'objectif ou le contexte pour l'agent, en le structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la compréhension de l'agent et les actions ultérieures. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Outils **: Spécifiez quels outils fluide prédéfinis l'agent est autorisé à utiliser pour atteindre ses objectifs. + * Pour chaque outil sélectionné, un ** facultatif ** nécessite un indicateur d'entrée humain ** indique si l'opération de l'outil peut elle-même s'arrêter pour demander une intervention humaine. + * ** Magasins de connaissances / documents **: Configurer l'accès aux informations dans les magasins de documents gérés par flux. + * ** Magasin de documents **: Choisissez une boutique de documents préconfigurée à partir de laquelle l'agent peut récupérer des informations. Ces magasins doivent être mis en place et peuplés à l'avance. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin de documents. Cette description guide l'agent pour comprendre quel type d'informations le magasin contient et quand il serait approprié de les interroger. + * ** Connaissances / Vector intégrés **: Configurez l'accès aux magasins de vecteurs externes et préexistants comme sources de connaissances supplémentaires pour l'agent. + * ** Vector Store **: Sélectionne la base de données vectorielle spécifique et préconfigurée que l'agent peut interroger. + * ** Modèle d'intégration **: Spécifie le modèle d'intégration associé au magasin vectoriel sélectionné, assurant la compatibilité des requêtes. + * ** Nom de la connaissance **: attribue un court nom descriptif à cette source de connaissances basée sur un vecteur, que l'agent peut utiliser pour référence. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin vectoriel, en guidant l'agent sur quand et comment utiliser cette source de connaissances spécifique. + * ** RETOUR DOCUMENTS SOURCES **: Si vous êtes activé, demande à l'agent d'inclure des informations sur les documents source avec les données récupérées du magasin vectoriel. + * ** Mémoire **: Si vous êtes activé, détermine si l'agent doit considérer l'historique du thread de conversation actuel lors de la prise de décisions et de la génération de réponses. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté à l'agent - par exemple, que ce soit pour inclure tous les messages, seulement une fenêtre récente ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** RETOUR RÉPONSE **: Configure comment la sortie ou le message final de l'agent est classé - en tant que message utilisateur ou message assistant - qui peut influencer la façon dont il est géré par des systèmes de mémoire ultérieurs ou la journalisation. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud d'agent sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise les données du déclencheur initial du workflow ou des sorties des nœuds précédents, souvent incorporés dans le`Messages`ou`Input Message`champs. Il accède aux outils configurés et aux sources de connaissances selon les besoins. +* ** Sorties: ** produit le résultat ou la réponse finale générée par l'agent une fois qu'il a terminé son raisonnement, sa planification et toute interaction avec des outils ou des sources de connaissances. + +
    + +*** + +### ** 4. Node d'outil ** + +Fournit un mécanisme pour exécuter directement et de manière déterministe un outil fluide spécifique et prédéfini dans la séquence de workflow. Contrairement au nœud d'agent, où le LLM choisit dynamiquement un outil basé sur le raisonnement, le nœud d'outil exécute exactement l'outil sélectionné par le concepteur de workflow pendant la configuration. + +* ** Fonctionnalité: ** Ce nœud est utilisé lorsque le workflow nécessite l'exécution d'une capacité spécifique connue à un point défini, avec des entrées facilement disponibles. Il garantit une action déterministe sans impliquer le raisonnement LLM pour la sélection des outils. +* ** Comment ça marche ** + 1. ** TRANGERS: ** Lorsque l'exécution du workflow atteint un nœud d'outil, il s'active. + 2. ** Identification de l'outil: ** Il identifie l'outil de flux spécifique sélectionné dans sa configuration. + 3. ** Résolution de l'argument d'entrée: ** Il examine la configuration des arguments d'entrée de l'outil. Pour chaque paramètre d'entrée requis de l'outil sélectionné. + 4. ** Exécution: ** Il invoque le code sous-jacent ou l'appel API associé à l'outil Flowise sélectionné, passant les arguments d'entrée résolus. + 5. ** Génération de sortie: ** Il reçoit le résultat renvoyé par l'exécution de l'outil. + 6. ** Propagation de sortie: ** Il rend ce résultat disponible via son ancre de sortie pour les nœuds suivants. +* ** Paramètres de configuration ** + * ** Sélection d'outils **: Choisissez l'outil Flowise spécifique et enregistré que ce nœud exécutera à partir d'une liste déroulante. + * ** Arguments d'entrée **: Définissez comment les données de votre flux de travail sont fournies à l'outil sélectionné. Cette section s'adapte dynamiquement en fonction de l'outil choisi, présentant ses paramètres d'entrée spécifiques: + * ** Nom de l'argument de la carte **: Pour chaque entrée, l'outil sélectionné nécessite (par exemple,`input`Pour une calculatrice), ce champ affichera le nom du paramètre attendu tel que défini par l'outil lui-même. + * ** Fournir une valeur d'argument **: Définissez la valeur de ce paramètre correspondant, en utilisant une variable dynamique comme`{{ previousNode.output }}`, `{{ $flow.state.someKey }}`, ou en entrant un texte statique. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de cet outil sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** reçoit les données nécessaires pour les arguments de l'outil via le`Input Arguments`mappage, valeurs d'approvisionnement à partir des sorties de nœud précédentes,`$flow.state`ou configurations statiques. +* ** Sorties: ** produit la sortie brute générée par l'outil exécuté - par exemple, une chaîne JSON à partir d'une API, un résultat de texte ou une valeur numérique. + +
    + +*** + +### ** 5. Retriever Node ** + +Effectue une récupération d'informations ciblée à partir des magasins de documents configurés. + +* ** Fonctionnalité: ** Ce nœud interroge un ou plusieurs magasins de documents spécifiés, récupérant des morceaux de document pertinents basés sur la similitude sémantique. C'est une alternative ciblée à l'utilisation d'un nœud d'agent lorsque la seule action requise est la récupération et la sélection des outils dynamiques par un LLM n'est pas nécessaire. +* ** Paramètres de configuration ** + * ** Magasins de connaissances / documents **: Spécifiez quel (s) magasin de documents préconfigurés et peuplés, ce nœud doit interroger pour trouver des informations pertinentes. + * ** Retriever Query **: Définissez la requête texte qui sera utilisée pour rechercher les magasins de documents sélectionnés. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Format de sortie **: Choisissez comment les informations récupérées doivent être présentées - soit comme simple`Text`ou comme`Text with Metadata`, qui peut inclure des détails tels que les noms de documents source ou les emplacements. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud Retriever sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** nécessite une chaîne de requête - souvent fournie comme une variable à partir d'une étape précédente ou d'une entrée utilisateur - et accède aux magasins de documents sélectionnés pour plus d'informations. +* ** sorties: ** produit les morceaux de document récupérés de la base de connaissances, formaté selon les choisis`Output Format`. + +
    + +*** + +### 6. nœud http + +Facilite la communication directe avec les services Web externes et les API via le protocole de transfert hypertexte (HTTP). + +* ** Fonctionnalité: ** Ce nœud permet au workflow d'interagir avec tout système externe accessible via HTTP. Il peut envoyer différents types de demandes (obtenir, publier, mettre, supprimer, patcher) à une URL spécifiée, permettant une intégration avec des API tierces, récupérer des données à partir de ressources Web ou déclencher des webhooks externes. Le nœud prend en charge la configuration des méthodes d'authentification, des en-têtes personnalisés, des paramètres de requête et différents types de corps de demande pour répondre aux diverses exigences d'API. +* ** Paramètres de configuration ** + * ** HTTP Idedential **: Sélectionnez éventuellement des informations d'identification préconfigurées - telles que l'authentification de base, le jeton de support ou la clé API - pour authentifier les demandes au service cible. + * ** Méthode de la demande **: Spécifiez la méthode HTTP à utiliser pour la demande - par exemple,`GET`, `POST`, `PUT`, `DELETE`, `PATCH`. + * ** URL cible **: Définissez l'URL complète du point de terminaison externe auquel la demande sera envoyée. + * ** En-têtes de demande **: Définissez tous les en-têtes HTTP nécessaires en paires de valeurs clés à inclure dans la demande. + * ** Paramètres de requête URL **: Définissez les paires de valeurs clés qui seront annexées à l'URL en tant que paramètres de requête. + * ** Type de corps de demande **: Choisissez le format de la charge utile de demande si l'envoi de données - les options incluent`JSON`, `Raw text`, `Form Data`, ou`x-www-form-urlencoded`. + * ** Demande Body **: Fournissez la charge utile de données réelle pour des méthodes comme le poste ou le put. Le format doit correspondre au sélectionné`Body Type`et les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Type de réponse **: Spécifiez comment le flux de travail doit interpréter la réponse reçue du serveur - les options incluent`JSON`, `Text`, `Array Buffer`, ou`Base64`pour les données binaires. +* ** Entrées: ** reçoit des données de configuration telles que l'URL, la méthode, les en-têtes et le corps, incorporant souvent des valeurs dynamiques à partir d'étapes de work`$flow.state`. +* ** Sorties: ** produit la réponse reçue du serveur externe, analysé selon le sélectionné`Response Type`. + +
    + +*** + +### ** 7. Node de condition ** + +Implémente la logique de ramification déterministe dans le flux de travail sur la base des règles définies. + +* ** Fonctionnalité: ** Ce nœud agit comme un point de décision, évaluant une ou plusieurs conditions spécifiées pour diriger le flux de travail dans différents chemins. Il compare les valeurs d'entrée - qui peuvent être des chaînes, des nombres ou des booléens - en utilisant une variété d'opérateurs logiques, tels que les égaux, contient, supérieur ou vide. Sur la base de la question de savoir si ces conditions évaluent en vrai ou fausse, l'exécution du flux de travail passe le long de l'une des branches de sortie distinctes connectées à ce nœud. +* ** Paramètres de configuration ** + * ** Conditions **: Configurez l'ensemble des règles logiques que le nœud évaluera. + * ** Type **: Spécifiez le type de données comparées pour cette règle -`String`, `Number`, ou`Boolean`. + * ** Valeur 1 **: Définissez la première valeur pour la comparaison. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Opération **: Sélectionnez l'opérateur logique à appliquer entre la valeur 1 et la valeur 2 - par exemple,`equal`, `notEqual`, `contains`, `larger`, `isEmpty`. + * ** Valeur 2 **: Définissez la deuxième valeur pour la comparaison, si nécessaire par l'opération choisie. Les données dynamiques peuvent également être insérées ici en utilisant`{{ variables }}`. +* ** Entrées: ** nécessite les données pour`Value 1`et`Value 2`pour chaque condition évaluée. Ces valeurs sont fournies à partir des sorties de nœud précédentes ou récupérées à partir de`$flow.state`. +* ** Sorties: ** fournit plusieurs ancres de sortie, correspondant au résultat booléen (vrai / faux) des conditions évaluées. Le flux de travail continue le long du chemin spécifique connecté à l'ancre de sortie qui correspond au résultat. + +
    + +*** + +### ** 8. Node d'agent de condition ** + +Fournit une ramification dynamique basée sur l'IA basée sur les instructions et le contexte du langage naturel. + +* ** Fonctionnalité: ** Ce nœud utilise un modèle grand langage (LLM) pour acheminer le workflow. Les analyses des analyses ont fourni des données d'entrée par rapport à un ensemble de "scénarios" définis par l'utilisateur - résultats ou catégories potentiels - guidés par des "instructions" de langage naturel de haut niveau qui définissent la tâche de prise de décision. Le LLM détermine ensuite quel scénario correspond le mieux au contexte d'entrée actuel. Sur la base de cette classification dirigée par l'IA, l'exécution du flux de travail réduit le chemin de sortie spécifique correspondant au scénario choisi. Ce nœud est particulièrement utile pour les tâches telles que la reconnaissance de l'intention des utilisateurs, le routage conditionnel complexe ou la prise de décision situationnelle nuancée où des règles simples et prédéfinies - comme dans le nœud de condition - sont insuffisantes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi qui effectuera l'analyse et la classification des scénarios. + * ** Instructions **: Définissez l'objectif ou la tâche globale du LLM en langage naturel - par exemple, "Déterminez si la demande de l'utilisateur concerne les ventes, le support ou la demande générale." + * ** Entrée **: spécifiez les données, souvent du texte à partir d'une étape précédente ou d'une entrée utilisateur, en utilisant`{{ variables }}`, que le LLM analysera pour prendre sa décision de routage. + * ** Scénarios **: Configurer un tableau définissant les résultats possibles ou les chemins distincts que le flux de travail peut prendre. Chaque scénario est décrit dans le langage naturel - par exemple, «Enquête sur les ventes», «demande de support», «question générale» - et chacune correspond à une ancre de sortie unique sur le nœud. +* ** Entrées: ** nécessite le`Input`Données pour l'analyse et le`Instructions`Pour guider le LLM. +* ** sorties: ** fournit plusieurs ancres de sortie, une pour chaque définie`Scenario`. Le workflow continue le long du chemin spécifique connecté à l'ancre de sortie que le LLM détermine le meilleur correspond à l'entrée. + +
    + +*** + +### ** 9. Node d'itération ** + +Exécute un "sous-flux" défini - une séquence de nœuds imbriqués - pour chaque élément d'un tableau d'entrée, implémentant une boucle "for-een". + +* ** Fonctionnalité: ** Ce nœud est conçu pour le traitement des collections de données. Il prend un tableau, fourni directement ou référencé via une variable, comme entrée. Pour chaque élément individuel à l'intérieur de ce tableau, le nœud d'itération exécute séquentiellement la séquence d'autres nœuds qui sont visuellement placés à l'intérieur de ses limites sur la toile. +* ** Paramètres de configuration ** + * ** Entrée du tableau **: Spécifie le tableau d'entrée que le nœud iratera. Ceci est fourni en faisant référence à une variable qui contient un tableau à partir de la sortie d'un nœud précédent ou du`$flow.state`- par exemple,`{{ $flow.state.itemList }}`. +* ** Entrées: ** nécessite un tableau à fournir à son`Array Input`paramètre. +* ** Sorties: ** Fournit une seule anage de sortie qui ne devient active qu'après que le sous-flux imbriqué a terminé l'exécution pour tous les éléments du tableau d'entrée. Les données transmises par cette sortie peuvent inclure des résultats agrégés ou l'état final des variables modifié dans la boucle, selon la conception du sous-flux. Les nœuds placés à l'intérieur du bloc d'itération ont leurs propres connexions d'entrée et de sortie distinctes qui définissent la séquence d'opérations pour chaque élément. + +
    + +*** + +### ** 10. Node de boucle ** + +Redirige explicitement l'exécution du workflow vers un nœud précédemment exécuté. + +* ** Fonctionnalité: ** Ce nœud permet la création de cycles ou de tentatives itératives dans un workflow. Lorsque le flux d'exécution atteint le nœud de boucle, il n'atteint pas un nouveau nœud; Au lieu de cela, il "remonte" à un nœud cible spécifié qui a déjà été exécuté plus tôt dans l'exécution actuelle du flux de travail. Cette action provoque la réexécution de ce nœud cible et de tous les nœuds suivants dans cette partie de l'écoulement. +* ** Paramètres de configuration ** + * ** Loop Retour à **: Sélectionne l'ID unique d'un nœud précédemment exécuté dans le flux de travail actuel auquel l'exécution doit retourner. + * ** MAX LOOP COUNT **: Définit le nombre maximal de fois que cette opération de boucle peut être effectuée dans une seule exécution de workflow, sauvegarde contre les cycles infinis. La valeur par défaut est 5. +* ** Entrées: ** Reçoit le signal d'exécution pour activer. Il suit en interne le nombre de fois que la boucle s'est produite pour l'exécution actuelle. +* ** Sorties: ** Ce nœud n'a pas d'ancre de sortie standard à pointant, car sa fonction principale est de rediriger le flux d'exécution vers l'arrière vers le`Loop Back To`Node cible, d'où le flux de travail continue alors. + +
    + +*** + +### ** 11. Node d'entrée humain ** + +Utilise l'exécution du workflow pour demander des entrées, une approbation ou des commentaires explicites d'un utilisateur humain - un composant clé pour les processus humains dans la boucle (HITL). + +* ** Fonctionnalité: ** Ce nœud arrête la progression automatisée du flux de travail et présente des informations ou une question à un utilisateur humain, via l'interface de chat. Le contenu affiché à l'utilisateur peut être un texte statique prédéfini ou généré dynamiquement par un LLM basé sur le contexte de workflow actuel. L'utilisateur reçoit des choix d'action distincts - par exemple, «procéder», «rejeter» - et, s'il est activé, un champ pour fournir des commentaires textuels. Une fois que l'utilisateur fait une sélection et soumet sa réponse, le flux de travail reprend l'exécution le long du chemin de sortie spécifique correspondant à son action choisie. +* ** Paramètres de configuration ** + * ** Type de description **: détermine comment le message ou la question présentée à l'utilisateur est généré - soit`Fixed`(texte statique) ou`Dynamic`(généré par un LLM). + * ** Si le type de description est`Fixed`** + * ** Description **: Ce champ contient le texte exact à afficher à l'utilisateur. Il prend en charge l'insertion de données dynamiques en utilisant`{{ variables }}` + * **Si`Description Type`est`Dynamic`** + * ** Modèle **: sélectionne le modèle AI dans un service choisi qui générera le message orienté utilisateur. + * ** invite **: fournit les instructions ou l'invite pour le LLM sélectionné pour générer le message affiché à l'utilisateur. + * ** Feedback: ** Si activé, l'utilisateur sera invité avec une fenêtre de rétroaction pour laisser ses commentaires, et ces commentaires seront annexés à la sortie du nœud. +* ** Entrées: ** Reçoit le signal d'exécution pour suspendre le workflow. Il peut utiliser les données des étapes précédentes ou`$flow.state`à travers des variables dans le`Description`ou`Prompt`champs s'ils sont configurés pour le contenu dynamique. +* ** Sorties: ** Fournit deux ancres de sortie, chacune correspondant à une action utilisateur distincte - une ancre pour "procéder" et une autre pour "rejeter". Le flux de travail continue le long du chemin connecté à l'ancre correspondant à la sélection de l'utilisateur. + +
    + +*** + +### ** 12. Node de réponse directe ** + +Envoie un message final à l'utilisateur et termine le chemin d'exécution actuel. + +* ** Fonctionnalité: ** Ce nœud sert de point de terminaison pour une branche spécifique ou l'intégralité d'un workflow. Il prend un message configuré - qui peut être du texte statique ou du contenu dynamique d'une variable - et le livre directement à l'utilisateur final via l'interface de chat. Lors de l'envoi de ce message, l'exécution le long de ce chemin particulier du workflow conclut; Aucun autre nœud connecté à partir de ce point ne sera traité. +* ** Paramètres de configuration ** + * ** Message **: Définissez le texte ou la variable`{{ variable }}`Cela contient le contenu à envoyer comme réponse finale à l'utilisateur. +* ** Entrées: ** reçoit le contenu du message, qui provient de la sortie d'un nœud précédent ou d'une valeur stockée dans`$flow.state`. +* ** Sorties: ** Ce nœud n'a pas d'ancres de sortie, car sa fonction est de terminer le chemin d'exécution après avoir envoyé la réponse. + +
    + +*** + +### ** 13. Node de fonction personnalisé ** + +Fournit un mécanisme pour exécuter le code JavaScript côté serveur personnalisé dans le workflow. + +* ** Fonctionnalité: ** Ce nœud permet d'écrire et d'exécuter des extraits arbitraires JavaScript, offrant un moyen efficace d'implémenter des transformations de données complexes, une logique métier sur mesure ou des interactions avec des ressources non directement prises en charge par d'autres nœuds standard. Le code exécuté fonctionne dans un environnement Node.js et a des moyens spécifiques d'accéder aux données: + * ** Variables d'entrée: ** Valeurs passées via le`Input Variables`La configuration est accessible dans la fonction, généralement préfixée avec`--- +description: Learn how to build multi-agents system using Agentflow V2, written by @toi500 +--- + +# Agentflow v2 + +Ce guide explore l'architecture AgentFlow V2, détaillant ses concepts principaux, ses cas d'utilisation, son état de flux et ses références de nœuds complets. + +{% hint style = "avertissement"%} +** Avis de non-responsabilité: ** Cette documentation décrit AgentFlow V2 à sa version officielle actuelle. Les fonctionnalités, les fonctionnalités et les paramètres de nœud sont soumis à un changement dans les futures mises à jour et versions de Flowise. Veuillez vous référer aux dernières notes de publication officielle ou à des informations sur l'application pour les détails les plus récents. +{% EndHint%} + +{% embed url = "https://youtu.be/-h4wquzrhhi?si=jkhuefiw06ao6ge"%} + +## Concept de base + +AgentFlow V2 représente une évolution architecturale significative, introduisant un nouveau paradigme en flux qui se concentre sur une orchestration explicite du flux de travail et une flexibilité accrue. Contrairement à la dépendance principale de V1 sur les cadres externes pour sa logique de graphique d'agent de base, V2 déplace l'attention de la conception de l'ensemble du flux de travail en utilisant un ensemble granulaire de nœuds autonomes spécialisés développés nativement en tant que composants coulissants principaux. + +Dans cette architecture V2, chaque nœud fonctionne comme une unité indépendante, exécutant une opération discrète en fonction de sa conception et de sa configuration spécifiques. Les connexions visuelles entre les nœuds de la canevas définissent explicitement le chemin de travail et la séquence de contrôle du workflow, les données peuvent être transmises entre les nœuds en faisant référence aux sorties de tout nœud précédemment exécuté dans le flux actuel, et l'état de flux fournit un mécanisme explicite pour gérer et partager des données tout au long du flux de travail. + +L'architecture V2 met en œuvre un système complet de la dépendance aux nœuds et de la file d'attente d'exécution qui respecte précisément ces voies définies tout en maintenant une séparation claire entre les composants, permettant aux flux de travail de devenir à la fois plus sophistiqués et plus faciles à concevoir. Cela permet aux modèles complexes comme les boucles, la ramification conditionnelle, les interactions humaines dans la boucle et d'autres à être réalisables. Cela le rend plus adaptable à divers cas d'utilisation tout en restant plus maintenable et extensible. + +
    + +## Différence entre AgentFlow et Plateforme d'automatisation + +L'une des questions les plus posées: quelle est la différence entre AgentFlow et les plates-formes d'automatisation comme N8N, Make ou Zapier? + +### 💬 ** Communication d'agent à agent ** + +La communication multimodale entre les agents est prise en charge. Un agent de superviseur peut formuler et déléguer des tâches à plusieurs agents de travailleurs, avec des sorties des agents des travailleurs retournés par la suite au superviseur. + +À chaque étape, les agents ont accès à l'historique complet de la conversation, permettant au superviseur de déterminer la tâche suivante et les agents des travailleurs pour interpréter la tâche, sélectionner les outils appropriés et exécuter les actions en conséquence. + +Cette architecture permet ** la collaboration, la délégation et la gestion des tâches partagées ** sur plusieurs agents, ces capacités ne sont généralement pas offertes par les outils d'automatisation traditionnels. + +
    + +### 🙋‍ Human dans la boucle + +L'exécution est interrompue en attendant l'entrée humaine, sans bloquer le thread en cours d'exécution. Chaque point de contrôle est enregistré, permettant au flux de travail de reprendre à partir du même point même après un redémarrage de l'application. + +L'utilisation de points de contrôle permet ** les agents de longue durée et avec état **. + +Les agents peuvent également être configurés pour ** demander l'autorisation avant d'exécuter des outils **, similaire à la façon dont Claude demande l'approbation de l'utilisateur avant d'utiliser les outils MCP. Cela permet d'empêcher l'exécution autonome d'actions sensibles sans l'approbation explicite de l'utilisateur. + +
    + +### 📖 État partagé + +L'état partagé permet l'échange de données entre les agents, particulièrement utile pour passer des données entre les branches ou les étapes non adjacentes d'un flux. Se référer à[#understanding-flow-state](agentflowv2.md#understanding-flow-state "mention") + +### ⚡ Streaming + +Prend en charge les événements de serveur (SSE) pour le streaming en temps réel de réponses LLM ou d'agent. Le streaming permet également aux mises à jour de l'abonnement à l'exécution au fur et à mesure que le flux de travail progresse. + +
    + +### 🌐 outils MCP + +Alors que les plates-formes d'automatisation traditionnelles présentent souvent de vastes bibliothèques d'intégrations prédéfinies, AgentFlow permet à MCP ([Model Context Protocol](https://github.com/modelcontextprotocol)) outils à connecter dans le cadre du flux de travail, plutôt que de fonctionner uniquement en tant qu'outils d'agent. + +Les MCP personnalisés peuvent également être créés indépendamment, sans dépendre des intégrations fournies par la plate-forme. MCP est largement considéré comme une norme de l'industrie et est généralement soutenu et maintenu par les prestataires officiels. Par exemple, le GitHub MCP est développé et maintenu par l'équipe GitHub, avec un soutien similaire fourni pour Atlassian Jira, Brave Search, et autres. + +
    + +## Référence du nœud AgentFlow V2 + +Cette section fournit une référence détaillée pour chaque nœud disponible, décrivant son objectif spécifique, les paramètres de configuration des clés, les entrées attendues, les sorties générées et son rôle dans l'architecture AgentFlow V2. + + + +*** + +### ** 1. Démarrer le nœud ** + +Le point d'entrée désigné pour lancer n'importe quelle exécution de workflow AgentFlow V2. Chaque flux doit commencer par ce nœud. + +* ** Fonctionnalité: ** Définit comment le flux de travail est déclenché et configure les conditions initiales. Il peut accepter les entrées directement à partir de l'interface de chat ou via un formulaire personnalisable présenté à l'utilisateur. Il permet également l'initialisation de`Flow State`Variables au début de l'exécution et peut gérer la façon dont la mémoire de conversation est gérée pour l'exécution. +* ** Paramètres de configuration ** + * ** Type d'entrée **: détermine comment l'exécution du flux de travail est initiée, soit par`Chat Input`de l'utilisateur ou via un soumis`Form Input`. + * ** Titre du formulaire, description du formulaire, types d'entrée de formulaire **: If`Form Input`est sélectionné, ces champs configurent l'apparence du formulaire présenté à l'utilisateur, permettant divers types de champs de saisie avec des étiquettes définies et des noms de variables. + * ** Mémoire éphémère **: si elle est activée, demande au workflow de commencer l'exécution sans considérer les messages passés du thread de conversation, en commençant efficacement par une ardoise de mémoire propre. + * ** État de flux **: définit l'ensemble complet des paires de valeurs clés initiales pour l'état d'exécution du workflow`$flow.state`. Toutes les clés d'état qui seront utilisées ou mises à jour par les nœuds suivantes doivent être déclarées et initialisées ici. +* ** Entrées: ** Reçoit les données initiales qui déclenchent le workflow, qui sera soit un message de chat, soit les données soumises via un formulaire. +* ** Sorties: ** Fournit une seule ancre de sortie pour se connecter au premier nœud opérationnel, passant les données d'entrée initiales et l'état de flux initialisé. + +
    + +*** + +### ** 2. Node LLM ** + +Fournit un accès direct à un modèle de grande langue (LLM) configuré pour exécuter des tâches AI, permettant au workflow d'effectuer une extraction structurée de données si nécessaire. + +* ** Fonctionnalité: ** Ce nœud envoie des demandes à un LLM basé sur des instructions (messages) et un contexte fourni. Il peut être utilisé pour la génération de texte, le résumé, la traduction, l'analyse, la réponse aux questions et la génération de sortie JSON structurée selon un schéma défini. Il a accès à la mémoire pour le thread de conversation et peut lire / écrire`Flow State`. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O d'OpenAI ou Google Gemini. + * ** Messages **: Définissez l'entrée conversationnelle pour le LLM, en la structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la réponse de l'IA. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Mémoire **: Si vous activez, détermine si le LLM doit considérer l'historique du thread de conversation actuel lors de la génération de sa réponse. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté au LLM - par exemple, s'il faut inclure tous les messages, seulement une fenêtre récente de virages ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** Retour Response As **: Configure comment la sortie de LLM est classée - comme un`User Message`ou`Assistant Message`- qui peut influencer la façon dont il est géré par les systèmes de mémoire ou la journalisation ultérieurs. + * ** Sortie structurée JSON **: Demande au LLM de formater sa sortie en fonction d'un schéma JSON spécifique - y compris des clés, des types de données et des descriptions - garantissant des données prévisibles et lisibles par la machine. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud LLM sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise des données du déclencheur initial du workflow ou des sorties des nœuds précédents, incorporant ces données dans le`Messages`ou`Input Message`champs. Il peut également récupérer des valeurs de`$flow.state`Lorsque les variables d'entrée le font référence. +* ** Sorties: ** produit la réponse de LLM, qui sera soit du texte brut, soit un objet JSON structuré. La catégorisation de cette sortie - en tant qu'utilisateur ou assistant - est déterminée par le`Return Response`paramètre. + +
    + +*** + +### ** 3. Node d'agent ** + +Représente une entité d'IA autonome capable de raisonner, de planifier et d'interagir avec des outils ou des sources de connaissances pour atteindre un objectif donné. + +* ** Fonctionnalité: ** Ce nœud utilise un LLM pour décider dynamiquement d'une séquence d'actions. En fonction de l'objectif de l'utilisateur - fourni via des messages / entrées - il peut choisir d'utiliser des outils disponibles ou des magasins de documents de requête pour recueillir des informations ou effectuer des actions. Il gère son propre cycle de raisonnement et peut utiliser la mémoire pour le fil de conversation et`Flow State`. Convient aux tâches nécessitant un raisonnement en plusieurs étapes ou interagissant dynamiquement avec des systèmes ou des outils externes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O ou Google Gemini d'OpenAI - qui conduira les processus de raisonnement et de prise de décision de l'agent. + * ** Messages **: Définissez l'entrée conversationnelle initiale, l'objectif ou le contexte pour l'agent, en le structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la compréhension de l'agent et les actions ultérieures. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Outils **: Spécifiez quels outils fluide prédéfinis l'agent est autorisé à utiliser pour atteindre ses objectifs. + * Pour chaque outil sélectionné, un ** facultatif ** nécessite un indicateur d'entrée humain ** indique si l'opération de l'outil peut elle-même s'arrêter pour demander une intervention humaine. + * ** Magasins de connaissances / documents **: Configurer l'accès aux informations dans les magasins de documents gérés par flux. + * ** Magasin de documents **: Choisissez une boutique de documents préconfigurée à partir de laquelle l'agent peut récupérer des informations. Ces magasins doivent être mis en place et peuplés à l'avance. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin de documents. Cette description guide l'agent pour comprendre quel type d'informations le magasin contient et quand il serait approprié de les interroger. + * ** Connaissances / Vector intégrés **: Configurez l'accès aux magasins de vecteurs externes et préexistants comme sources de connaissances supplémentaires pour l'agent. + * ** Vector Store **: Sélectionne la base de données vectorielle spécifique et préconfigurée que l'agent peut interroger. + * ** Modèle d'intégration **: Spécifie le modèle d'intégration associé au magasin vectoriel sélectionné, assurant la compatibilité des requêtes. + * ** Nom de la connaissance **: attribue un court nom descriptif à cette source de connaissances basée sur un vecteur, que l'agent peut utiliser pour référence. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin vectoriel, en guidant l'agent sur quand et comment utiliser cette source de connaissances spécifique. + * ** RETOUR DOCUMENTS SOURCES **: Si vous êtes activé, demande à l'agent d'inclure des informations sur les documents source avec les données récupérées du magasin vectoriel. + * ** Mémoire **: Si vous êtes activé, détermine si l'agent doit considérer l'historique du thread de conversation actuel lors de la prise de décisions et de la génération de réponses. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté à l'agent - par exemple, que ce soit pour inclure tous les messages, seulement une fenêtre récente ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** RETOUR RÉPONSE **: Configure comment la sortie ou le message final de l'agent est classé - en tant que message utilisateur ou message assistant - qui peut influencer la façon dont il est géré par des systèmes de mémoire ultérieurs ou la journalisation. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud d'agent sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise les données du déclencheur initial du workflow ou des sorties des nœuds précédents, souvent incorporés dans le`Messages`ou`Input Message`champs. Il accède aux outils configurés et aux sources de connaissances selon les besoins. +* ** Sorties: ** produit le résultat ou la réponse finale générée par l'agent une fois qu'il a terminé son raisonnement, sa planification et toute interaction avec des outils ou des sources de connaissances. + +
    + +*** + +### ** 4. Node d'outil ** + +Fournit un mécanisme pour exécuter directement et de manière déterministe un outil fluide spécifique et prédéfini dans la séquence de workflow. Contrairement au nœud d'agent, où le LLM choisit dynamiquement un outil basé sur le raisonnement, le nœud d'outil exécute exactement l'outil sélectionné par le concepteur de workflow pendant la configuration. + +* ** Fonctionnalité: ** Ce nœud est utilisé lorsque le workflow nécessite l'exécution d'une capacité spécifique connue à un point défini, avec des entrées facilement disponibles. Il garantit une action déterministe sans impliquer le raisonnement LLM pour la sélection des outils. +* ** Comment ça marche ** + 1. ** TRANGERS: ** Lorsque l'exécution du workflow atteint un nœud d'outil, il s'active. + 2. ** Identification de l'outil: ** Il identifie l'outil de flux spécifique sélectionné dans sa configuration. + 3. ** Résolution de l'argument d'entrée: ** Il examine la configuration des arguments d'entrée de l'outil. Pour chaque paramètre d'entrée requis de l'outil sélectionné. + 4. ** Exécution: ** Il invoque le code sous-jacent ou l'appel API associé à l'outil Flowise sélectionné, passant les arguments d'entrée résolus. + 5. ** Génération de sortie: ** Il reçoit le résultat renvoyé par l'exécution de l'outil. + 6. ** Propagation de sortie: ** Il rend ce résultat disponible via son ancre de sortie pour les nœuds suivants. +* ** Paramètres de configuration ** + * ** Sélection d'outils **: Choisissez l'outil Flowise spécifique et enregistré que ce nœud exécutera à partir d'une liste déroulante. + * ** Arguments d'entrée **: Définissez comment les données de votre flux de travail sont fournies à l'outil sélectionné. Cette section s'adapte dynamiquement en fonction de l'outil choisi, présentant ses paramètres d'entrée spécifiques: + * ** Nom de l'argument de la carte **: Pour chaque entrée, l'outil sélectionné nécessite (par exemple,`input`Pour une calculatrice), ce champ affichera le nom du paramètre attendu tel que défini par l'outil lui-même. + * ** Fournir une valeur d'argument **: Définissez la valeur de ce paramètre correspondant, en utilisant une variable dynamique comme`{{ previousNode.output }}`, `{{ $flow.state.someKey }}`, ou en entrant un texte statique. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de cet outil sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** reçoit les données nécessaires pour les arguments de l'outil via le`Input Arguments`mappage, valeurs d'approvisionnement à partir des sorties de nœud précédentes,`$flow.state`ou configurations statiques. +* ** Sorties: ** produit la sortie brute générée par l'outil exécuté - par exemple, une chaîne JSON à partir d'une API, un résultat de texte ou une valeur numérique. + +
    + +*** + +### ** 5. Retriever Node ** + +Effectue une récupération d'informations ciblée à partir des magasins de documents configurés. + +* ** Fonctionnalité: ** Ce nœud interroge un ou plusieurs magasins de documents spécifiés, récupérant des morceaux de document pertinents basés sur la similitude sémantique. C'est une alternative ciblée à l'utilisation d'un nœud d'agent lorsque la seule action requise est la récupération et la sélection des outils dynamiques par un LLM n'est pas nécessaire. +* ** Paramètres de configuration ** + * ** Magasins de connaissances / documents **: Spécifiez quel (s) magasin de documents préconfigurés et peuplés, ce nœud doit interroger pour trouver des informations pertinentes. + * ** Retriever Query **: Définissez la requête texte qui sera utilisée pour rechercher les magasins de documents sélectionnés. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Format de sortie **: Choisissez comment les informations récupérées doivent être présentées - soit comme simple`Text`ou comme`Text with Metadata`, qui peut inclure des détails tels que les noms de documents source ou les emplacements. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud Retriever sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** nécessite une chaîne de requête - souvent fournie comme une variable à partir d'une étape précédente ou d'une entrée utilisateur - et accède aux magasins de documents sélectionnés pour plus d'informations. +* ** sorties: ** produit les morceaux de document récupérés de la base de connaissances, formaté selon les choisis`Output Format`. + +
    + +*** + +### 6. nœud http + +Facilite la communication directe avec les services Web externes et les API via le protocole de transfert hypertexte (HTTP). + +* ** Fonctionnalité: ** Ce nœud permet au workflow d'interagir avec tout système externe accessible via HTTP. Il peut envoyer différents types de demandes (obtenir, publier, mettre, supprimer, patcher) à une URL spécifiée, permettant une intégration avec des API tierces, récupérer des données à partir de ressources Web ou déclencher des webhooks externes. Le nœud prend en charge la configuration des méthodes d'authentification, des en-têtes personnalisés, des paramètres de requête et différents types de corps de demande pour répondre aux diverses exigences d'API. +* ** Paramètres de configuration ** + * ** HTTP Idedential **: Sélectionnez éventuellement des informations d'identification préconfigurées - telles que l'authentification de base, le jeton de support ou la clé API - pour authentifier les demandes au service cible. + * ** Méthode de la demande **: Spécifiez la méthode HTTP à utiliser pour la demande - par exemple,`GET`, `POST`, `PUT`, `DELETE`, `PATCH`. + * ** URL cible **: Définissez l'URL complète du point de terminaison externe auquel la demande sera envoyée. + * ** En-têtes de demande **: Définissez tous les en-têtes HTTP nécessaires en paires de valeurs clés à inclure dans la demande. + * ** Paramètres de requête URL **: Définissez les paires de valeurs clés qui seront annexées à l'URL en tant que paramètres de requête. + * ** Type de corps de demande **: Choisissez le format de la charge utile de demande si l'envoi de données - les options incluent`JSON`, `Raw text`, `Form Data`, ou`x-www-form-urlencoded`. + * ** Demande Body **: Fournissez la charge utile de données réelle pour des méthodes comme le poste ou le put. Le format doit correspondre au sélectionné`Body Type`et les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Type de réponse **: Spécifiez comment le flux de travail doit interpréter la réponse reçue du serveur - les options incluent`JSON`, `Text`, `Array Buffer`, ou`Base64`pour les données binaires. +* ** Entrées: ** reçoit des données de configuration telles que l'URL, la méthode, les en-têtes et le corps, incorporant souvent des valeurs dynamiques à partir d'étapes de work`$flow.state`. +* ** Sorties: ** produit la réponse reçue du serveur externe, analysé selon le sélectionné`Response Type`. + +
    + +*** + +### ** 7. Node de condition ** + +Implémente la logique de ramification déterministe dans le flux de travail sur la base des règles définies. + +* ** Fonctionnalité: ** Ce nœud agit comme un point de décision, évaluant une ou plusieurs conditions spécifiées pour diriger le flux de travail dans différents chemins. Il compare les valeurs d'entrée - qui peuvent être des chaînes, des nombres ou des booléens - en utilisant une variété d'opérateurs logiques, tels que les égaux, contient, supérieur ou vide. Sur la base de la question de savoir si ces conditions évaluent en vrai ou fausse, l'exécution du flux de travail passe le long de l'une des branches de sortie distinctes connectées à ce nœud. +* ** Paramètres de configuration ** + * ** Conditions **: Configurez l'ensemble des règles logiques que le nœud évaluera. + * ** Type **: Spécifiez le type de données comparées pour cette règle -`String`, `Number`, ou`Boolean`. + * ** Valeur 1 **: Définissez la première valeur pour la comparaison. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Opération **: Sélectionnez l'opérateur logique à appliquer entre la valeur 1 et la valeur 2 - par exemple,`equal`, `notEqual`, `contains`, `larger`, `isEmpty`. + * ** Valeur 2 **: Définissez la deuxième valeur pour la comparaison, si nécessaire par l'opération choisie. Les données dynamiques peuvent également être insérées ici en utilisant`{{ variables }}`. +* ** Entrées: ** nécessite les données pour`Value 1`et`Value 2`pour chaque condition évaluée. Ces valeurs sont fournies à partir des sorties de nœud précédentes ou récupérées à partir de`$flow.state`. +* ** Sorties: ** fournit plusieurs ancres de sortie, correspondant au résultat booléen (vrai / faux) des conditions évaluées. Le flux de travail continue le long du chemin spécifique connecté à l'ancre de sortie qui correspond au résultat. + +
    + +*** + +### ** 8. Node d'agent de condition ** + +Fournit une ramification dynamique basée sur l'IA basée sur les instructions et le contexte du langage naturel. + +* ** Fonctionnalité: ** Ce nœud utilise un modèle grand langage (LLM) pour acheminer le workflow. Les analyses des analyses ont fourni des données d'entrée par rapport à un ensemble de "scénarios" définis par l'utilisateur - résultats ou catégories potentiels - guidés par des "instructions" de langage naturel de haut niveau qui définissent la tâche de prise de décision. Le LLM détermine ensuite quel scénario correspond le mieux au contexte d'entrée actuel. Sur la base de cette classification dirigée par l'IA, l'exécution du flux de travail réduit le chemin de sortie spécifique correspondant au scénario choisi. Ce nœud est particulièrement utile pour les tâches telles que la reconnaissance de l'intention des utilisateurs, le routage conditionnel complexe ou la prise de décision situationnelle nuancée où des règles simples et prédéfinies - comme dans le nœud de condition - sont insuffisantes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi qui effectuera l'analyse et la classification des scénarios. + * ** Instructions **: Définissez l'objectif ou la tâche globale du LLM en langage naturel - par exemple, "Déterminez si la demande de l'utilisateur concerne les ventes, le support ou la demande générale." + * ** Entrée **: spécifiez les données, souvent du texte à partir d'une étape précédente ou d'une entrée utilisateur, en utilisant`{{ variables }}`, que le LLM analysera pour prendre sa décision de routage. + * ** Scénarios **: Configurer un tableau définissant les résultats possibles ou les chemins distincts que le flux de travail peut prendre. Chaque scénario est décrit dans le langage naturel - par exemple, «Enquête sur les ventes», «demande de support», «question générale» - et chacune correspond à une ancre de sortie unique sur le nœud. +* ** Entrées: ** nécessite le`Input`Données pour l'analyse et le`Instructions`Pour guider le LLM. +* ** sorties: ** fournit plusieurs ancres de sortie, une pour chaque définie`Scenario`. Le workflow continue le long du chemin spécifique connecté à l'ancre de sortie que le LLM détermine le meilleur correspond à l'entrée. + +
    + +*** + +### ** 9. Node d'itération ** + +Exécute un "sous-flux" défini - une séquence de nœuds imbriqués - pour chaque élément d'un tableau d'entrée, implémentant une boucle "for-een". + +* ** Fonctionnalité: ** Ce nœud est conçu pour le traitement des collections de données. Il prend un tableau, fourni directement ou référencé via une variable, comme entrée. Pour chaque élément individuel à l'intérieur de ce tableau, le nœud d'itération exécute séquentiellement la séquence d'autres nœuds qui sont visuellement placés à l'intérieur de ses limites sur la toile. +* ** Paramètres de configuration ** + * ** Entrée du tableau **: Spécifie le tableau d'entrée que le nœud iratera. Ceci est fourni en faisant référence à une variable qui contient un tableau à partir de la sortie d'un nœud précédent ou du`$flow.state`- par exemple,`{{ $flow.state.itemList }}`. +* ** Entrées: ** nécessite un tableau à fournir à son`Array Input`paramètre. +* ** Sorties: ** Fournit une seule anage de sortie qui ne devient active qu'après que le sous-flux imbriqué a terminé l'exécution pour tous les éléments du tableau d'entrée. Les données transmises par cette sortie peuvent inclure des résultats agrégés ou l'état final des variables modifié dans la boucle, selon la conception du sous-flux. Les nœuds placés à l'intérieur du bloc d'itération ont leurs propres connexions d'entrée et de sortie distinctes qui définissent la séquence d'opérations pour chaque élément. + +
    + +*** + +### ** 10. Node de boucle ** + +Redirige explicitement l'exécution du workflow vers un nœud précédemment exécuté. + +* ** Fonctionnalité: ** Ce nœud permet la création de cycles ou de tentatives itératives dans un workflow. Lorsque le flux d'exécution atteint le nœud de boucle, il n'atteint pas un nouveau nœud; Au lieu de cela, il "remonte" à un nœud cible spécifié qui a déjà été exécuté plus tôt dans l'exécution actuelle du flux de travail. Cette action provoque la réexécution de ce nœud cible et de tous les nœuds suivants dans cette partie de l'écoulement. +* ** Paramètres de configuration ** + * ** Loop Retour à **: Sélectionne l'ID unique d'un nœud précédemment exécuté dans le flux de travail actuel auquel l'exécution doit retourner. + * ** MAX LOOP COUNT **: Définit le nombre maximal de fois que cette opération de boucle peut être effectuée dans une seule exécution de workflow, sauvegarde contre les cycles infinis. La valeur par défaut est 5. +* ** Entrées: ** Reçoit le signal d'exécution pour activer. Il suit en interne le nombre de fois que la boucle s'est produite pour l'exécution actuelle. +* ** Sorties: ** Ce nœud n'a pas d'ancre de sortie standard à pointant, car sa fonction principale est de rediriger le flux d'exécution vers l'arrière vers le`Loop Back To`Node cible, d'où le flux de travail continue alors. + +
    + +*** + +### ** 11. Node d'entrée humain ** + +Utilise l'exécution du workflow pour demander des entrées, une approbation ou des commentaires explicites d'un utilisateur humain - un composant clé pour les processus humains dans la boucle (HITL). + +* ** Fonctionnalité: ** Ce nœud arrête la progression automatisée du flux de travail et présente des informations ou une question à un utilisateur humain, via l'interface de chat. Le contenu affiché à l'utilisateur peut être un texte statique prédéfini ou généré dynamiquement par un LLM basé sur le contexte de workflow actuel. L'utilisateur reçoit des choix d'action distincts - par exemple, «procéder», «rejeter» - et, s'il est activé, un champ pour fournir des commentaires textuels. Une fois que l'utilisateur fait une sélection et soumet sa réponse, le flux de travail reprend l'exécution le long du chemin de sortie spécifique correspondant à son action choisie. +* ** Paramètres de configuration ** + * ** Type de description **: détermine comment le message ou la question présentée à l'utilisateur est généré - soit`Fixed`(texte statique) ou`Dynamic`(généré par un LLM). + * ** Si le type de description est`Fixed`** + * ** Description **: Ce champ contient le texte exact à afficher à l'utilisateur. Il prend en charge l'insertion de données dynamiques en utilisant`{{ variables }}` + * **Si`Description Type`est`Dynamic`** + * ** Modèle **: sélectionne le modèle AI dans un service choisi qui générera le message orienté utilisateur. + * ** invite **: fournit les instructions ou l'invite pour le LLM sélectionné pour générer le message affiché à l'utilisateur. + * ** Feedback: ** Si activé, l'utilisateur sera invité avec une fenêtre de rétroaction pour laisser ses commentaires, et ces commentaires seront annexés à la sortie du nœud. +* ** Entrées: ** Reçoit le signal d'exécution pour suspendre le workflow. Il peut utiliser les données des étapes précédentes ou`$flow.state`à travers des variables dans le`Description`ou`Prompt`champs s'ils sont configurés pour le contenu dynamique. +* ** Sorties: ** Fournit deux ancres de sortie, chacune correspondant à une action utilisateur distincte - une ancre pour "procéder" et une autre pour "rejeter". Le flux de travail continue le long du chemin connecté à l'ancre correspondant à la sélection de l'utilisateur. + +
    + +*** + +### ** 12. Node de réponse directe ** + +Envoie un message final à l'utilisateur et termine le chemin d'exécution actuel. + +* ** Fonctionnalité: ** Ce nœud sert de point de terminaison pour une branche spécifique ou l'intégralité d'un workflow. Il prend un message configuré - qui peut être du texte statique ou du contenu dynamique d'une variable - et le livre directement à l'utilisateur final via l'interface de chat. Lors de l'envoi de ce message, l'exécution le long de ce chemin particulier du workflow conclut; Aucun autre nœud connecté à partir de ce point ne sera traité. +* ** Paramètres de configuration ** + * ** Message **: Définissez le texte ou la variable`{{ variable }}`Cela contient le contenu à envoyer comme réponse finale à l'utilisateur. +* ** Entrées: ** reçoit le contenu du message, qui provient de la sortie d'un nœud précédent ou d'une valeur stockée dans`$flow.state`. +* ** Sorties: ** Ce nœud n'a pas d'ancres de sortie, car sa fonction est de terminer le chemin d'exécution après avoir envoyé la réponse. + +
    + +*** + +### ** 13. Node de fonction personnalisé ** + +Fournit un mécanisme pour exécuter le code JavaScript côté serveur personnalisé dans le workflow. + +* ** Fonctionnalité: ** Ce nœud permet d'écrire et d'exécuter des extraits arbitraires JavaScript, offrant un moyen efficace d'implémenter des transformations de données complexes, une logique métier sur mesure ou des interactions avec des ressources non directement prises en charge par d'autres nœuds standard. Le code exécuté fonctionne dans un environnement Node.js et a des moyens spécifiques d'accéder aux données: + * ** Variables d'entrée: ** Valeurs passées via le`Input Variables`La configuration est accessible dans la fonction, généralement préfixée avec- par exemple, si une variable d'entrée`userid`est défini, il est accessible comme`$userid`. + * ** Contexte de flux: ** Les variables de configuration de flux par défaut sont disponibles, telles que`$flow.sessionId`, `$flow.chatId`, `$flow.chatflowId`, `$flow.input`- l'entrée initiale qui a commencé le flux de travail - et l'ensemble`$flow.state`objet. + * ** Variables personnalisées: ** Toutes les variables personnalisées configurées dans Flowise - par exemple,`$vars.`. + * ** Bibliothèques: ** La fonction peut utiliser toutes les bibliothèques qui ont été importées et rendues disponibles dans l'environnement backend Flowise. ** La fonction doit renvoyer une valeur de chaîne à la fin de son exécution **. +* ** Paramètres de configuration ** + * ** Variables d'entrée **: Configurez un tableau de définitions d'entrée qui seront transmises sous forme de variables dans la portée de votre fonction JavaScript. Pour chaque variable que vous souhaitez définir, vous spécifierez: + * ** Nom de la variable **: le nom que vous utiliserez pour vous référer à cette variable dans votre code JavaScript, généralement préfixé avec un`--- +description: Learn how to build multi-agents system using Agentflow V2, written by @toi500 +--- + +# Agentflow v2 + +Ce guide explore l'architecture AgentFlow V2, détaillant ses concepts principaux, ses cas d'utilisation, son état de flux et ses références de nœuds complets. + +{% hint style = "avertissement"%} +** Avis de non-responsabilité: ** Cette documentation décrit AgentFlow V2 à sa version officielle actuelle. Les fonctionnalités, les fonctionnalités et les paramètres de nœud sont soumis à un changement dans les futures mises à jour et versions de Flowise. Veuillez vous référer aux dernières notes de publication officielle ou à des informations sur l'application pour les détails les plus récents. +{% EndHint%} + +{% embed url = "https://youtu.be/-h4wquzrhhi?si=jkhuefiw06ao6ge"%} + +## Concept de base + +AgentFlow V2 représente une évolution architecturale significative, introduisant un nouveau paradigme en flux qui se concentre sur une orchestration explicite du flux de travail et une flexibilité accrue. Contrairement à la dépendance principale de V1 sur les cadres externes pour sa logique de graphique d'agent de base, V2 déplace l'attention de la conception de l'ensemble du flux de travail en utilisant un ensemble granulaire de nœuds autonomes spécialisés développés nativement en tant que composants coulissants principaux. + +Dans cette architecture V2, chaque nœud fonctionne comme une unité indépendante, exécutant une opération discrète en fonction de sa conception et de sa configuration spécifiques. Les connexions visuelles entre les nœuds de la canevas définissent explicitement le chemin de travail et la séquence de contrôle du workflow, les données peuvent être transmises entre les nœuds en faisant référence aux sorties de tout nœud précédemment exécuté dans le flux actuel, et l'état de flux fournit un mécanisme explicite pour gérer et partager des données tout au long du flux de travail. + +L'architecture V2 met en œuvre un système complet de la dépendance aux nœuds et de la file d'attente d'exécution qui respecte précisément ces voies définies tout en maintenant une séparation claire entre les composants, permettant aux flux de travail de devenir à la fois plus sophistiqués et plus faciles à concevoir. Cela permet aux modèles complexes comme les boucles, la ramification conditionnelle, les interactions humaines dans la boucle et d'autres à être réalisables. Cela le rend plus adaptable à divers cas d'utilisation tout en restant plus maintenable et extensible. + +
    + +## Différence entre AgentFlow et Plateforme d'automatisation + +L'une des questions les plus posées: quelle est la différence entre AgentFlow et les plates-formes d'automatisation comme N8N, Make ou Zapier? + +### 💬 ** Communication d'agent à agent ** + +La communication multimodale entre les agents est prise en charge. Un agent de superviseur peut formuler et déléguer des tâches à plusieurs agents de travailleurs, avec des sorties des agents des travailleurs retournés par la suite au superviseur. + +À chaque étape, les agents ont accès à l'historique complet de la conversation, permettant au superviseur de déterminer la tâche suivante et les agents des travailleurs pour interpréter la tâche, sélectionner les outils appropriés et exécuter les actions en conséquence. + +Cette architecture permet ** la collaboration, la délégation et la gestion des tâches partagées ** sur plusieurs agents, ces capacités ne sont généralement pas offertes par les outils d'automatisation traditionnels. + +
    + +### 🙋‍ Human dans la boucle + +L'exécution est interrompue en attendant l'entrée humaine, sans bloquer le thread en cours d'exécution. Chaque point de contrôle est enregistré, permettant au flux de travail de reprendre à partir du même point même après un redémarrage de l'application. + +L'utilisation de points de contrôle permet ** les agents de longue durée et avec état **. + +Les agents peuvent également être configurés pour ** demander l'autorisation avant d'exécuter des outils **, similaire à la façon dont Claude demande l'approbation de l'utilisateur avant d'utiliser les outils MCP. Cela permet d'empêcher l'exécution autonome d'actions sensibles sans l'approbation explicite de l'utilisateur. + +
    + +### 📖 État partagé + +L'état partagé permet l'échange de données entre les agents, particulièrement utile pour passer des données entre les branches ou les étapes non adjacentes d'un flux. Se référer à[#understanding-flow-state](agentflowv2.md#understanding-flow-state "mention") + +### ⚡ Streaming + +Prend en charge les événements de serveur (SSE) pour le streaming en temps réel de réponses LLM ou d'agent. Le streaming permet également aux mises à jour de l'abonnement à l'exécution au fur et à mesure que le flux de travail progresse. + +
    + +### 🌐 outils MCP + +Alors que les plates-formes d'automatisation traditionnelles présentent souvent de vastes bibliothèques d'intégrations prédéfinies, AgentFlow permet à MCP ([Model Context Protocol](https://github.com/modelcontextprotocol)) outils à connecter dans le cadre du flux de travail, plutôt que de fonctionner uniquement en tant qu'outils d'agent. + +Les MCP personnalisés peuvent également être créés indépendamment, sans dépendre des intégrations fournies par la plate-forme. MCP est largement considéré comme une norme de l'industrie et est généralement soutenu et maintenu par les prestataires officiels. Par exemple, le GitHub MCP est développé et maintenu par l'équipe GitHub, avec un soutien similaire fourni pour Atlassian Jira, Brave Search, et autres. + +
    + +## Référence du nœud AgentFlow V2 + +Cette section fournit une référence détaillée pour chaque nœud disponible, décrivant son objectif spécifique, les paramètres de configuration des clés, les entrées attendues, les sorties générées et son rôle dans l'architecture AgentFlow V2. + + + +*** + +### ** 1. Démarrer le nœud ** + +Le point d'entrée désigné pour lancer n'importe quelle exécution de workflow AgentFlow V2. Chaque flux doit commencer par ce nœud. + +* ** Fonctionnalité: ** Définit comment le flux de travail est déclenché et configure les conditions initiales. Il peut accepter les entrées directement à partir de l'interface de chat ou via un formulaire personnalisable présenté à l'utilisateur. Il permet également l'initialisation de`Flow State`Variables au début de l'exécution et peut gérer la façon dont la mémoire de conversation est gérée pour l'exécution. +* ** Paramètres de configuration ** + * ** Type d'entrée **: détermine comment l'exécution du flux de travail est initiée, soit par`Chat Input`de l'utilisateur ou via un soumis`Form Input`. + * ** Titre du formulaire, description du formulaire, types d'entrée de formulaire **: If`Form Input`est sélectionné, ces champs configurent l'apparence du formulaire présenté à l'utilisateur, permettant divers types de champs de saisie avec des étiquettes définies et des noms de variables. + * ** Mémoire éphémère **: si elle est activée, demande au workflow de commencer l'exécution sans considérer les messages passés du thread de conversation, en commençant efficacement par une ardoise de mémoire propre. + * ** État de flux **: définit l'ensemble complet des paires de valeurs clés initiales pour l'état d'exécution du workflow`$flow.state`. Toutes les clés d'état qui seront utilisées ou mises à jour par les nœuds suivantes doivent être déclarées et initialisées ici. +* ** Entrées: ** Reçoit les données initiales qui déclenchent le workflow, qui sera soit un message de chat, soit les données soumises via un formulaire. +* ** Sorties: ** Fournit une seule ancre de sortie pour se connecter au premier nœud opérationnel, passant les données d'entrée initiales et l'état de flux initialisé. + +
    + +*** + +### ** 2. Node LLM ** + +Fournit un accès direct à un modèle de grande langue (LLM) configuré pour exécuter des tâches AI, permettant au workflow d'effectuer une extraction structurée de données si nécessaire. + +* ** Fonctionnalité: ** Ce nœud envoie des demandes à un LLM basé sur des instructions (messages) et un contexte fourni. Il peut être utilisé pour la génération de texte, le résumé, la traduction, l'analyse, la réponse aux questions et la génération de sortie JSON structurée selon un schéma défini. Il a accès à la mémoire pour le thread de conversation et peut lire / écrire`Flow State`. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O d'OpenAI ou Google Gemini. + * ** Messages **: Définissez l'entrée conversationnelle pour le LLM, en la structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la réponse de l'IA. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Mémoire **: Si vous activez, détermine si le LLM doit considérer l'historique du thread de conversation actuel lors de la génération de sa réponse. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté au LLM - par exemple, s'il faut inclure tous les messages, seulement une fenêtre récente de virages ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** Retour Response As **: Configure comment la sortie de LLM est classée - comme un`User Message`ou`Assistant Message`- qui peut influencer la façon dont il est géré par les systèmes de mémoire ou la journalisation ultérieurs. + * ** Sortie structurée JSON **: Demande au LLM de formater sa sortie en fonction d'un schéma JSON spécifique - y compris des clés, des types de données et des descriptions - garantissant des données prévisibles et lisibles par la machine. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud LLM sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise des données du déclencheur initial du workflow ou des sorties des nœuds précédents, incorporant ces données dans le`Messages`ou`Input Message`champs. Il peut également récupérer des valeurs de`$flow.state`Lorsque les variables d'entrée le font référence. +* ** Sorties: ** produit la réponse de LLM, qui sera soit du texte brut, soit un objet JSON structuré. La catégorisation de cette sortie - en tant qu'utilisateur ou assistant - est déterminée par le`Return Response`paramètre. + +
    + +*** + +### ** 3. Node d'agent ** + +Représente une entité d'IA autonome capable de raisonner, de planifier et d'interagir avec des outils ou des sources de connaissances pour atteindre un objectif donné. + +* ** Fonctionnalité: ** Ce nœud utilise un LLM pour décider dynamiquement d'une séquence d'actions. En fonction de l'objectif de l'utilisateur - fourni via des messages / entrées - il peut choisir d'utiliser des outils disponibles ou des magasins de documents de requête pour recueillir des informations ou effectuer des actions. Il gère son propre cycle de raisonnement et peut utiliser la mémoire pour le fil de conversation et`Flow State`. Convient aux tâches nécessitant un raisonnement en plusieurs étapes ou interagissant dynamiquement avec des systèmes ou des outils externes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O ou Google Gemini d'OpenAI - qui conduira les processus de raisonnement et de prise de décision de l'agent. + * ** Messages **: Définissez l'entrée conversationnelle initiale, l'objectif ou le contexte pour l'agent, en le structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la compréhension de l'agent et les actions ultérieures. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Outils **: Spécifiez quels outils fluide prédéfinis l'agent est autorisé à utiliser pour atteindre ses objectifs. + * Pour chaque outil sélectionné, un ** facultatif ** nécessite un indicateur d'entrée humain ** indique si l'opération de l'outil peut elle-même s'arrêter pour demander une intervention humaine. + * ** Magasins de connaissances / documents **: Configurer l'accès aux informations dans les magasins de documents gérés par flux. + * ** Magasin de documents **: Choisissez une boutique de documents préconfigurée à partir de laquelle l'agent peut récupérer des informations. Ces magasins doivent être mis en place et peuplés à l'avance. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin de documents. Cette description guide l'agent pour comprendre quel type d'informations le magasin contient et quand il serait approprié de les interroger. + * ** Connaissances / Vector intégrés **: Configurez l'accès aux magasins de vecteurs externes et préexistants comme sources de connaissances supplémentaires pour l'agent. + * ** Vector Store **: Sélectionne la base de données vectorielle spécifique et préconfigurée que l'agent peut interroger. + * ** Modèle d'intégration **: Spécifie le modèle d'intégration associé au magasin vectoriel sélectionné, assurant la compatibilité des requêtes. + * ** Nom de la connaissance **: attribue un court nom descriptif à cette source de connaissances basée sur un vecteur, que l'agent peut utiliser pour référence. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin vectoriel, en guidant l'agent sur quand et comment utiliser cette source de connaissances spécifique. + * ** RETOUR DOCUMENTS SOURCES **: Si vous êtes activé, demande à l'agent d'inclure des informations sur les documents source avec les données récupérées du magasin vectoriel. + * ** Mémoire **: Si vous êtes activé, détermine si l'agent doit considérer l'historique du thread de conversation actuel lors de la prise de décisions et de la génération de réponses. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté à l'agent - par exemple, que ce soit pour inclure tous les messages, seulement une fenêtre récente ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** RETOUR RÉPONSE **: Configure comment la sortie ou le message final de l'agent est classé - en tant que message utilisateur ou message assistant - qui peut influencer la façon dont il est géré par des systèmes de mémoire ultérieurs ou la journalisation. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud d'agent sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise les données du déclencheur initial du workflow ou des sorties des nœuds précédents, souvent incorporés dans le`Messages`ou`Input Message`champs. Il accède aux outils configurés et aux sources de connaissances selon les besoins. +* ** Sorties: ** produit le résultat ou la réponse finale générée par l'agent une fois qu'il a terminé son raisonnement, sa planification et toute interaction avec des outils ou des sources de connaissances. + +
    + +*** + +### ** 4. Node d'outil ** + +Fournit un mécanisme pour exécuter directement et de manière déterministe un outil fluide spécifique et prédéfini dans la séquence de workflow. Contrairement au nœud d'agent, où le LLM choisit dynamiquement un outil basé sur le raisonnement, le nœud d'outil exécute exactement l'outil sélectionné par le concepteur de workflow pendant la configuration. + +* ** Fonctionnalité: ** Ce nœud est utilisé lorsque le workflow nécessite l'exécution d'une capacité spécifique connue à un point défini, avec des entrées facilement disponibles. Il garantit une action déterministe sans impliquer le raisonnement LLM pour la sélection des outils. +* ** Comment ça marche ** + 1. ** TRANGERS: ** Lorsque l'exécution du workflow atteint un nœud d'outil, il s'active. + 2. ** Identification de l'outil: ** Il identifie l'outil de flux spécifique sélectionné dans sa configuration. + 3. ** Résolution de l'argument d'entrée: ** Il examine la configuration des arguments d'entrée de l'outil. Pour chaque paramètre d'entrée requis de l'outil sélectionné. + 4. ** Exécution: ** Il invoque le code sous-jacent ou l'appel API associé à l'outil Flowise sélectionné, passant les arguments d'entrée résolus. + 5. ** Génération de sortie: ** Il reçoit le résultat renvoyé par l'exécution de l'outil. + 6. ** Propagation de sortie: ** Il rend ce résultat disponible via son ancre de sortie pour les nœuds suivants. +* ** Paramètres de configuration ** + * ** Sélection d'outils **: Choisissez l'outil Flowise spécifique et enregistré que ce nœud exécutera à partir d'une liste déroulante. + * ** Arguments d'entrée **: Définissez comment les données de votre flux de travail sont fournies à l'outil sélectionné. Cette section s'adapte dynamiquement en fonction de l'outil choisi, présentant ses paramètres d'entrée spécifiques: + * ** Nom de l'argument de la carte **: Pour chaque entrée, l'outil sélectionné nécessite (par exemple,`input`Pour une calculatrice), ce champ affichera le nom du paramètre attendu tel que défini par l'outil lui-même. + * ** Fournir une valeur d'argument **: Définissez la valeur de ce paramètre correspondant, en utilisant une variable dynamique comme`{{ previousNode.output }}`, `{{ $flow.state.someKey }}`, ou en entrant un texte statique. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de cet outil sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** reçoit les données nécessaires pour les arguments de l'outil via le`Input Arguments`mappage, valeurs d'approvisionnement à partir des sorties de nœud précédentes,`$flow.state`ou configurations statiques. +* ** Sorties: ** produit la sortie brute générée par l'outil exécuté - par exemple, une chaîne JSON à partir d'une API, un résultat de texte ou une valeur numérique. + +
    + +*** + +### ** 5. Retriever Node ** + +Effectue une récupération d'informations ciblée à partir des magasins de documents configurés. + +* ** Fonctionnalité: ** Ce nœud interroge un ou plusieurs magasins de documents spécifiés, récupérant des morceaux de document pertinents basés sur la similitude sémantique. C'est une alternative ciblée à l'utilisation d'un nœud d'agent lorsque la seule action requise est la récupération et la sélection des outils dynamiques par un LLM n'est pas nécessaire. +* ** Paramètres de configuration ** + * ** Magasins de connaissances / documents **: Spécifiez quel (s) magasin de documents préconfigurés et peuplés, ce nœud doit interroger pour trouver des informations pertinentes. + * ** Retriever Query **: Définissez la requête texte qui sera utilisée pour rechercher les magasins de documents sélectionnés. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Format de sortie **: Choisissez comment les informations récupérées doivent être présentées - soit comme simple`Text`ou comme`Text with Metadata`, qui peut inclure des détails tels que les noms de documents source ou les emplacements. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud Retriever sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** nécessite une chaîne de requête - souvent fournie comme une variable à partir d'une étape précédente ou d'une entrée utilisateur - et accède aux magasins de documents sélectionnés pour plus d'informations. +* ** sorties: ** produit les morceaux de document récupérés de la base de connaissances, formaté selon les choisis`Output Format`. + +
    + +*** + +### 6. nœud http + +Facilite la communication directe avec les services Web externes et les API via le protocole de transfert hypertexte (HTTP). + +* ** Fonctionnalité: ** Ce nœud permet au workflow d'interagir avec tout système externe accessible via HTTP. Il peut envoyer différents types de demandes (obtenir, publier, mettre, supprimer, patcher) à une URL spécifiée, permettant une intégration avec des API tierces, récupérer des données à partir de ressources Web ou déclencher des webhooks externes. Le nœud prend en charge la configuration des méthodes d'authentification, des en-têtes personnalisés, des paramètres de requête et différents types de corps de demande pour répondre aux diverses exigences d'API. +* ** Paramètres de configuration ** + * ** HTTP Idedential **: Sélectionnez éventuellement des informations d'identification préconfigurées - telles que l'authentification de base, le jeton de support ou la clé API - pour authentifier les demandes au service cible. + * ** Méthode de la demande **: Spécifiez la méthode HTTP à utiliser pour la demande - par exemple,`GET`, `POST`, `PUT`, `DELETE`, `PATCH`. + * ** URL cible **: Définissez l'URL complète du point de terminaison externe auquel la demande sera envoyée. + * ** En-têtes de demande **: Définissez tous les en-têtes HTTP nécessaires en paires de valeurs clés à inclure dans la demande. + * ** Paramètres de requête URL **: Définissez les paires de valeurs clés qui seront annexées à l'URL en tant que paramètres de requête. + * ** Type de corps de demande **: Choisissez le format de la charge utile de demande si l'envoi de données - les options incluent`JSON`, `Raw text`, `Form Data`, ou`x-www-form-urlencoded`. + * ** Demande Body **: Fournissez la charge utile de données réelle pour des méthodes comme le poste ou le put. Le format doit correspondre au sélectionné`Body Type`et les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Type de réponse **: Spécifiez comment le flux de travail doit interpréter la réponse reçue du serveur - les options incluent`JSON`, `Text`, `Array Buffer`, ou`Base64`pour les données binaires. +* ** Entrées: ** reçoit des données de configuration telles que l'URL, la méthode, les en-têtes et le corps, incorporant souvent des valeurs dynamiques à partir d'étapes de work`$flow.state`. +* ** Sorties: ** produit la réponse reçue du serveur externe, analysé selon le sélectionné`Response Type`. + +
    + +*** + +### ** 7. Node de condition ** + +Implémente la logique de ramification déterministe dans le flux de travail sur la base des règles définies. + +* ** Fonctionnalité: ** Ce nœud agit comme un point de décision, évaluant une ou plusieurs conditions spécifiées pour diriger le flux de travail dans différents chemins. Il compare les valeurs d'entrée - qui peuvent être des chaînes, des nombres ou des booléens - en utilisant une variété d'opérateurs logiques, tels que les égaux, contient, supérieur ou vide. Sur la base de la question de savoir si ces conditions évaluent en vrai ou fausse, l'exécution du flux de travail passe le long de l'une des branches de sortie distinctes connectées à ce nœud. +* ** Paramètres de configuration ** + * ** Conditions **: Configurez l'ensemble des règles logiques que le nœud évaluera. + * ** Type **: Spécifiez le type de données comparées pour cette règle -`String`, `Number`, ou`Boolean`. + * ** Valeur 1 **: Définissez la première valeur pour la comparaison. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Opération **: Sélectionnez l'opérateur logique à appliquer entre la valeur 1 et la valeur 2 - par exemple,`equal`, `notEqual`, `contains`, `larger`, `isEmpty`. + * ** Valeur 2 **: Définissez la deuxième valeur pour la comparaison, si nécessaire par l'opération choisie. Les données dynamiques peuvent également être insérées ici en utilisant`{{ variables }}`. +* ** Entrées: ** nécessite les données pour`Value 1`et`Value 2`pour chaque condition évaluée. Ces valeurs sont fournies à partir des sorties de nœud précédentes ou récupérées à partir de`$flow.state`. +* ** Sorties: ** fournit plusieurs ancres de sortie, correspondant au résultat booléen (vrai / faux) des conditions évaluées. Le flux de travail continue le long du chemin spécifique connecté à l'ancre de sortie qui correspond au résultat. + +
    + +*** + +### ** 8. Node d'agent de condition ** + +Fournit une ramification dynamique basée sur l'IA basée sur les instructions et le contexte du langage naturel. + +* ** Fonctionnalité: ** Ce nœud utilise un modèle grand langage (LLM) pour acheminer le workflow. Les analyses des analyses ont fourni des données d'entrée par rapport à un ensemble de "scénarios" définis par l'utilisateur - résultats ou catégories potentiels - guidés par des "instructions" de langage naturel de haut niveau qui définissent la tâche de prise de décision. Le LLM détermine ensuite quel scénario correspond le mieux au contexte d'entrée actuel. Sur la base de cette classification dirigée par l'IA, l'exécution du flux de travail réduit le chemin de sortie spécifique correspondant au scénario choisi. Ce nœud est particulièrement utile pour les tâches telles que la reconnaissance de l'intention des utilisateurs, le routage conditionnel complexe ou la prise de décision situationnelle nuancée où des règles simples et prédéfinies - comme dans le nœud de condition - sont insuffisantes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi qui effectuera l'analyse et la classification des scénarios. + * ** Instructions **: Définissez l'objectif ou la tâche globale du LLM en langage naturel - par exemple, "Déterminez si la demande de l'utilisateur concerne les ventes, le support ou la demande générale." + * ** Entrée **: spécifiez les données, souvent du texte à partir d'une étape précédente ou d'une entrée utilisateur, en utilisant`{{ variables }}`, que le LLM analysera pour prendre sa décision de routage. + * ** Scénarios **: Configurer un tableau définissant les résultats possibles ou les chemins distincts que le flux de travail peut prendre. Chaque scénario est décrit dans le langage naturel - par exemple, «Enquête sur les ventes», «demande de support», «question générale» - et chacune correspond à une ancre de sortie unique sur le nœud. +* ** Entrées: ** nécessite le`Input`Données pour l'analyse et le`Instructions`Pour guider le LLM. +* ** sorties: ** fournit plusieurs ancres de sortie, une pour chaque définie`Scenario`. Le workflow continue le long du chemin spécifique connecté à l'ancre de sortie que le LLM détermine le meilleur correspond à l'entrée. + +
    + +*** + +### ** 9. Node d'itération ** + +Exécute un "sous-flux" défini - une séquence de nœuds imbriqués - pour chaque élément d'un tableau d'entrée, implémentant une boucle "for-een". + +* ** Fonctionnalité: ** Ce nœud est conçu pour le traitement des collections de données. Il prend un tableau, fourni directement ou référencé via une variable, comme entrée. Pour chaque élément individuel à l'intérieur de ce tableau, le nœud d'itération exécute séquentiellement la séquence d'autres nœuds qui sont visuellement placés à l'intérieur de ses limites sur la toile. +* ** Paramètres de configuration ** + * ** Entrée du tableau **: Spécifie le tableau d'entrée que le nœud iratera. Ceci est fourni en faisant référence à une variable qui contient un tableau à partir de la sortie d'un nœud précédent ou du`$flow.state`- par exemple,`{{ $flow.state.itemList }}`. +* ** Entrées: ** nécessite un tableau à fournir à son`Array Input`paramètre. +* ** Sorties: ** Fournit une seule anage de sortie qui ne devient active qu'après que le sous-flux imbriqué a terminé l'exécution pour tous les éléments du tableau d'entrée. Les données transmises par cette sortie peuvent inclure des résultats agrégés ou l'état final des variables modifié dans la boucle, selon la conception du sous-flux. Les nœuds placés à l'intérieur du bloc d'itération ont leurs propres connexions d'entrée et de sortie distinctes qui définissent la séquence d'opérations pour chaque élément. + +
    + +*** + +### ** 10. Node de boucle ** + +Redirige explicitement l'exécution du workflow vers un nœud précédemment exécuté. + +* ** Fonctionnalité: ** Ce nœud permet la création de cycles ou de tentatives itératives dans un workflow. Lorsque le flux d'exécution atteint le nœud de boucle, il n'atteint pas un nouveau nœud; Au lieu de cela, il "remonte" à un nœud cible spécifié qui a déjà été exécuté plus tôt dans l'exécution actuelle du flux de travail. Cette action provoque la réexécution de ce nœud cible et de tous les nœuds suivants dans cette partie de l'écoulement. +* ** Paramètres de configuration ** + * ** Loop Retour à **: Sélectionne l'ID unique d'un nœud précédemment exécuté dans le flux de travail actuel auquel l'exécution doit retourner. + * ** MAX LOOP COUNT **: Définit le nombre maximal de fois que cette opération de boucle peut être effectuée dans une seule exécution de workflow, sauvegarde contre les cycles infinis. La valeur par défaut est 5. +* ** Entrées: ** Reçoit le signal d'exécution pour activer. Il suit en interne le nombre de fois que la boucle s'est produite pour l'exécution actuelle. +* ** Sorties: ** Ce nœud n'a pas d'ancre de sortie standard à pointant, car sa fonction principale est de rediriger le flux d'exécution vers l'arrière vers le`Loop Back To`Node cible, d'où le flux de travail continue alors. + +
    + +*** + +### ** 11. Node d'entrée humain ** + +Utilise l'exécution du workflow pour demander des entrées, une approbation ou des commentaires explicites d'un utilisateur humain - un composant clé pour les processus humains dans la boucle (HITL). + +* ** Fonctionnalité: ** Ce nœud arrête la progression automatisée du flux de travail et présente des informations ou une question à un utilisateur humain, via l'interface de chat. Le contenu affiché à l'utilisateur peut être un texte statique prédéfini ou généré dynamiquement par un LLM basé sur le contexte de workflow actuel. L'utilisateur reçoit des choix d'action distincts - par exemple, «procéder», «rejeter» - et, s'il est activé, un champ pour fournir des commentaires textuels. Une fois que l'utilisateur fait une sélection et soumet sa réponse, le flux de travail reprend l'exécution le long du chemin de sortie spécifique correspondant à son action choisie. +* ** Paramètres de configuration ** + * ** Type de description **: détermine comment le message ou la question présentée à l'utilisateur est généré - soit`Fixed`(texte statique) ou`Dynamic`(généré par un LLM). + * ** Si le type de description est`Fixed`** + * ** Description **: Ce champ contient le texte exact à afficher à l'utilisateur. Il prend en charge l'insertion de données dynamiques en utilisant`{{ variables }}` + * **Si`Description Type`est`Dynamic`** + * ** Modèle **: sélectionne le modèle AI dans un service choisi qui générera le message orienté utilisateur. + * ** invite **: fournit les instructions ou l'invite pour le LLM sélectionné pour générer le message affiché à l'utilisateur. + * ** Feedback: ** Si activé, l'utilisateur sera invité avec une fenêtre de rétroaction pour laisser ses commentaires, et ces commentaires seront annexés à la sortie du nœud. +* ** Entrées: ** Reçoit le signal d'exécution pour suspendre le workflow. Il peut utiliser les données des étapes précédentes ou`$flow.state`à travers des variables dans le`Description`ou`Prompt`champs s'ils sont configurés pour le contenu dynamique. +* ** Sorties: ** Fournit deux ancres de sortie, chacune correspondant à une action utilisateur distincte - une ancre pour "procéder" et une autre pour "rejeter". Le flux de travail continue le long du chemin connecté à l'ancre correspondant à la sélection de l'utilisateur. + +
    + +*** + +### ** 12. Node de réponse directe ** + +Envoie un message final à l'utilisateur et termine le chemin d'exécution actuel. + +* ** Fonctionnalité: ** Ce nœud sert de point de terminaison pour une branche spécifique ou l'intégralité d'un workflow. Il prend un message configuré - qui peut être du texte statique ou du contenu dynamique d'une variable - et le livre directement à l'utilisateur final via l'interface de chat. Lors de l'envoi de ce message, l'exécution le long de ce chemin particulier du workflow conclut; Aucun autre nœud connecté à partir de ce point ne sera traité. +* ** Paramètres de configuration ** + * ** Message **: Définissez le texte ou la variable`{{ variable }}`Cela contient le contenu à envoyer comme réponse finale à l'utilisateur. +* ** Entrées: ** reçoit le contenu du message, qui provient de la sortie d'un nœud précédent ou d'une valeur stockée dans`$flow.state`. +* ** Sorties: ** Ce nœud n'a pas d'ancres de sortie, car sa fonction est de terminer le chemin d'exécution après avoir envoyé la réponse. + +
    + +*** + +### ** 13. Node de fonction personnalisé ** + +Fournit un mécanisme pour exécuter le code JavaScript côté serveur personnalisé dans le workflow. + +* ** Fonctionnalité: ** Ce nœud permet d'écrire et d'exécuter des extraits arbitraires JavaScript, offrant un moyen efficace d'implémenter des transformations de données complexes, une logique métier sur mesure ou des interactions avec des ressources non directement prises en charge par d'autres nœuds standard. Le code exécuté fonctionne dans un environnement Node.js et a des moyens spécifiques d'accéder aux données: + * ** Variables d'entrée: ** Valeurs passées via le`Input Variables`La configuration est accessible dans la fonction, généralement préfixée avec`--- +description: Learn how to build multi-agents system using Agentflow V2, written by @toi500 +--- + +# Agentflow v2 + +Ce guide explore l'architecture AgentFlow V2, détaillant ses concepts principaux, ses cas d'utilisation, son état de flux et ses références de nœuds complets. + +{% hint style = "avertissement"%} +** Avis de non-responsabilité: ** Cette documentation décrit AgentFlow V2 à sa version officielle actuelle. Les fonctionnalités, les fonctionnalités et les paramètres de nœud sont soumis à un changement dans les futures mises à jour et versions de Flowise. Veuillez vous référer aux dernières notes de publication officielle ou à des informations sur l'application pour les détails les plus récents. +{% EndHint%} + +{% embed url = "https://youtu.be/-h4wquzrhhi?si=jkhuefiw06ao6ge"%} + +## Concept de base + +AgentFlow V2 représente une évolution architecturale significative, introduisant un nouveau paradigme en flux qui se concentre sur une orchestration explicite du flux de travail et une flexibilité accrue. Contrairement à la dépendance principale de V1 sur les cadres externes pour sa logique de graphique d'agent de base, V2 déplace l'attention de la conception de l'ensemble du flux de travail en utilisant un ensemble granulaire de nœuds autonomes spécialisés développés nativement en tant que composants coulissants principaux. + +Dans cette architecture V2, chaque nœud fonctionne comme une unité indépendante, exécutant une opération discrète en fonction de sa conception et de sa configuration spécifiques. Les connexions visuelles entre les nœuds de la canevas définissent explicitement le chemin de travail et la séquence de contrôle du workflow, les données peuvent être transmises entre les nœuds en faisant référence aux sorties de tout nœud précédemment exécuté dans le flux actuel, et l'état de flux fournit un mécanisme explicite pour gérer et partager des données tout au long du flux de travail. + +L'architecture V2 met en œuvre un système complet de la dépendance aux nœuds et de la file d'attente d'exécution qui respecte précisément ces voies définies tout en maintenant une séparation claire entre les composants, permettant aux flux de travail de devenir à la fois plus sophistiqués et plus faciles à concevoir. Cela permet aux modèles complexes comme les boucles, la ramification conditionnelle, les interactions humaines dans la boucle et d'autres à être réalisables. Cela le rend plus adaptable à divers cas d'utilisation tout en restant plus maintenable et extensible. + +
    + +## Différence entre AgentFlow et Plateforme d'automatisation + +L'une des questions les plus posées: quelle est la différence entre AgentFlow et les plates-formes d'automatisation comme N8N, Make ou Zapier? + +### 💬 ** Communication d'agent à agent ** + +La communication multimodale entre les agents est prise en charge. Un agent de superviseur peut formuler et déléguer des tâches à plusieurs agents de travailleurs, avec des sorties des agents des travailleurs retournés par la suite au superviseur. + +À chaque étape, les agents ont accès à l'historique complet de la conversation, permettant au superviseur de déterminer la tâche suivante et les agents des travailleurs pour interpréter la tâche, sélectionner les outils appropriés et exécuter les actions en conséquence. + +Cette architecture permet ** la collaboration, la délégation et la gestion des tâches partagées ** sur plusieurs agents, ces capacités ne sont généralement pas offertes par les outils d'automatisation traditionnels. + +
    + +### 🙋‍ Human dans la boucle + +L'exécution est interrompue en attendant l'entrée humaine, sans bloquer le thread en cours d'exécution. Chaque point de contrôle est enregistré, permettant au flux de travail de reprendre à partir du même point même après un redémarrage de l'application. + +L'utilisation de points de contrôle permet ** les agents de longue durée et avec état **. + +Les agents peuvent également être configurés pour ** demander l'autorisation avant d'exécuter des outils **, similaire à la façon dont Claude demande l'approbation de l'utilisateur avant d'utiliser les outils MCP. Cela permet d'empêcher l'exécution autonome d'actions sensibles sans l'approbation explicite de l'utilisateur. + +
    + +### 📖 État partagé + +L'état partagé permet l'échange de données entre les agents, particulièrement utile pour passer des données entre les branches ou les étapes non adjacentes d'un flux. Se référer à[#understanding-flow-state](agentflowv2.md#understanding-flow-state "mention") + +### ⚡ Streaming + +Prend en charge les événements de serveur (SSE) pour le streaming en temps réel de réponses LLM ou d'agent. Le streaming permet également aux mises à jour de l'abonnement à l'exécution au fur et à mesure que le flux de travail progresse. + +
    + +### 🌐 outils MCP + +Alors que les plates-formes d'automatisation traditionnelles présentent souvent de vastes bibliothèques d'intégrations prédéfinies, AgentFlow permet à MCP ([Model Context Protocol](https://github.com/modelcontextprotocol)) outils à connecter dans le cadre du flux de travail, plutôt que de fonctionner uniquement en tant qu'outils d'agent. + +Les MCP personnalisés peuvent également être créés indépendamment, sans dépendre des intégrations fournies par la plate-forme. MCP est largement considéré comme une norme de l'industrie et est généralement soutenu et maintenu par les prestataires officiels. Par exemple, le GitHub MCP est développé et maintenu par l'équipe GitHub, avec un soutien similaire fourni pour Atlassian Jira, Brave Search, et autres. + +
    + +## Référence du nœud AgentFlow V2 + +Cette section fournit une référence détaillée pour chaque nœud disponible, décrivant son objectif spécifique, les paramètres de configuration des clés, les entrées attendues, les sorties générées et son rôle dans l'architecture AgentFlow V2. + + + +*** + +### ** 1. Démarrer le nœud ** + +Le point d'entrée désigné pour lancer n'importe quelle exécution de workflow AgentFlow V2. Chaque flux doit commencer par ce nœud. + +* ** Fonctionnalité: ** Définit comment le flux de travail est déclenché et configure les conditions initiales. Il peut accepter les entrées directement à partir de l'interface de chat ou via un formulaire personnalisable présenté à l'utilisateur. Il permet également l'initialisation de`Flow State`Variables au début de l'exécution et peut gérer la façon dont la mémoire de conversation est gérée pour l'exécution. +* ** Paramètres de configuration ** + * ** Type d'entrée **: détermine comment l'exécution du flux de travail est initiée, soit par`Chat Input`de l'utilisateur ou via un soumis`Form Input`. + * ** Titre du formulaire, description du formulaire, types d'entrée de formulaire **: If`Form Input`est sélectionné, ces champs configurent l'apparence du formulaire présenté à l'utilisateur, permettant divers types de champs de saisie avec des étiquettes définies et des noms de variables. + * ** Mémoire éphémère **: si elle est activée, demande au workflow de commencer l'exécution sans considérer les messages passés du thread de conversation, en commençant efficacement par une ardoise de mémoire propre. + * ** État de flux **: définit l'ensemble complet des paires de valeurs clés initiales pour l'état d'exécution du workflow`$flow.state`. Toutes les clés d'état qui seront utilisées ou mises à jour par les nœuds suivantes doivent être déclarées et initialisées ici. +* ** Entrées: ** Reçoit les données initiales qui déclenchent le workflow, qui sera soit un message de chat, soit les données soumises via un formulaire. +* ** Sorties: ** Fournit une seule ancre de sortie pour se connecter au premier nœud opérationnel, passant les données d'entrée initiales et l'état de flux initialisé. + +
    + +*** + +### ** 2. Node LLM ** + +Fournit un accès direct à un modèle de grande langue (LLM) configuré pour exécuter des tâches AI, permettant au workflow d'effectuer une extraction structurée de données si nécessaire. + +* ** Fonctionnalité: ** Ce nœud envoie des demandes à un LLM basé sur des instructions (messages) et un contexte fourni. Il peut être utilisé pour la génération de texte, le résumé, la traduction, l'analyse, la réponse aux questions et la génération de sortie JSON structurée selon un schéma défini. Il a accès à la mémoire pour le thread de conversation et peut lire / écrire`Flow State`. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O d'OpenAI ou Google Gemini. + * ** Messages **: Définissez l'entrée conversationnelle pour le LLM, en la structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la réponse de l'IA. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Mémoire **: Si vous activez, détermine si le LLM doit considérer l'historique du thread de conversation actuel lors de la génération de sa réponse. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté au LLM - par exemple, s'il faut inclure tous les messages, seulement une fenêtre récente de virages ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** Retour Response As **: Configure comment la sortie de LLM est classée - comme un`User Message`ou`Assistant Message`- qui peut influencer la façon dont il est géré par les systèmes de mémoire ou la journalisation ultérieurs. + * ** Sortie structurée JSON **: Demande au LLM de formater sa sortie en fonction d'un schéma JSON spécifique - y compris des clés, des types de données et des descriptions - garantissant des données prévisibles et lisibles par la machine. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud LLM sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise des données du déclencheur initial du workflow ou des sorties des nœuds précédents, incorporant ces données dans le`Messages`ou`Input Message`champs. Il peut également récupérer des valeurs de`$flow.state`Lorsque les variables d'entrée le font référence. +* ** Sorties: ** produit la réponse de LLM, qui sera soit du texte brut, soit un objet JSON structuré. La catégorisation de cette sortie - en tant qu'utilisateur ou assistant - est déterminée par le`Return Response`paramètre. + +
    + +*** + +### ** 3. Node d'agent ** + +Représente une entité d'IA autonome capable de raisonner, de planifier et d'interagir avec des outils ou des sources de connaissances pour atteindre un objectif donné. + +* ** Fonctionnalité: ** Ce nœud utilise un LLM pour décider dynamiquement d'une séquence d'actions. En fonction de l'objectif de l'utilisateur - fourni via des messages / entrées - il peut choisir d'utiliser des outils disponibles ou des magasins de documents de requête pour recueillir des informations ou effectuer des actions. Il gère son propre cycle de raisonnement et peut utiliser la mémoire pour le fil de conversation et`Flow State`. Convient aux tâches nécessitant un raisonnement en plusieurs étapes ou interagissant dynamiquement avec des systèmes ou des outils externes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi - par exemple, GPT-4O ou Google Gemini d'OpenAI - qui conduira les processus de raisonnement et de prise de décision de l'agent. + * ** Messages **: Définissez l'entrée conversationnelle initiale, l'objectif ou le contexte pour l'agent, en le structurant comme une séquence de rôles - système, utilisateur, assistant, développeur - pour guider la compréhension de l'agent et les actions ultérieures. Les données dynamiques peuvent être insérées en utilisant`{{ variable }}`. + * ** Outils **: Spécifiez quels outils fluide prédéfinis l'agent est autorisé à utiliser pour atteindre ses objectifs. + * Pour chaque outil sélectionné, un ** facultatif ** nécessite un indicateur d'entrée humain ** indique si l'opération de l'outil peut elle-même s'arrêter pour demander une intervention humaine. + * ** Magasins de connaissances / documents **: Configurer l'accès aux informations dans les magasins de documents gérés par flux. + * ** Magasin de documents **: Choisissez une boutique de documents préconfigurée à partir de laquelle l'agent peut récupérer des informations. Ces magasins doivent être mis en place et peuplés à l'avance. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin de documents. Cette description guide l'agent pour comprendre quel type d'informations le magasin contient et quand il serait approprié de les interroger. + * ** Connaissances / Vector intégrés **: Configurez l'accès aux magasins de vecteurs externes et préexistants comme sources de connaissances supplémentaires pour l'agent. + * ** Vector Store **: Sélectionne la base de données vectorielle spécifique et préconfigurée que l'agent peut interroger. + * ** Modèle d'intégration **: Spécifie le modèle d'intégration associé au magasin vectoriel sélectionné, assurant la compatibilité des requêtes. + * ** Nom de la connaissance **: attribue un court nom descriptif à cette source de connaissances basée sur un vecteur, que l'agent peut utiliser pour référence. + * ** Décrire les connaissances **: Fournir une description du langage naturel du contenu et du but de ce magasin vectoriel, en guidant l'agent sur quand et comment utiliser cette source de connaissances spécifique. + * ** RETOUR DOCUMENTS SOURCES **: Si vous êtes activé, demande à l'agent d'inclure des informations sur les documents source avec les données récupérées du magasin vectoriel. + * ** Mémoire **: Si vous êtes activé, détermine si l'agent doit considérer l'historique du thread de conversation actuel lors de la prise de décisions et de la génération de réponses. + * ** Type de mémoire, taille de la fenêtre, limite de jeton maximale **: Si la mémoire est utilisée, ces paramètres affinent comment l'historique de la conversation est géré et présenté à l'agent - par exemple, que ce soit pour inclure tous les messages, seulement une fenêtre récente ou une version résumée. + * ** Message d'entrée **: Spécifie la variable ou le texte qui sera annexé comme le message utilisateur le plus récent à la fin du contexte de conversation existant - y compris le contexte initial et la mémoire - avant d'être traités par le LLM / Agent. + * ** RETOUR RÉPONSE **: Configure comment la sortie ou le message final de l'agent est classé - en tant que message utilisateur ou message assistant - qui peut influencer la façon dont il est géré par des systèmes de mémoire ultérieurs ou la journalisation. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud d'agent sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** Ce nœud utilise les données du déclencheur initial du workflow ou des sorties des nœuds précédents, souvent incorporés dans le`Messages`ou`Input Message`champs. Il accède aux outils configurés et aux sources de connaissances selon les besoins. +* ** Sorties: ** produit le résultat ou la réponse finale générée par l'agent une fois qu'il a terminé son raisonnement, sa planification et toute interaction avec des outils ou des sources de connaissances. + +
    + +*** + +### ** 4. Node d'outil ** + +Fournit un mécanisme pour exécuter directement et de manière déterministe un outil fluide spécifique et prédéfini dans la séquence de workflow. Contrairement au nœud d'agent, où le LLM choisit dynamiquement un outil basé sur le raisonnement, le nœud d'outil exécute exactement l'outil sélectionné par le concepteur de workflow pendant la configuration. + +* ** Fonctionnalité: ** Ce nœud est utilisé lorsque le workflow nécessite l'exécution d'une capacité spécifique connue à un point défini, avec des entrées facilement disponibles. Il garantit une action déterministe sans impliquer le raisonnement LLM pour la sélection des outils. +* ** Comment ça marche ** + 1. ** TRANGERS: ** Lorsque l'exécution du workflow atteint un nœud d'outil, il s'active. + 2. ** Identification de l'outil: ** Il identifie l'outil de flux spécifique sélectionné dans sa configuration. + 3. ** Résolution de l'argument d'entrée: ** Il examine la configuration des arguments d'entrée de l'outil. Pour chaque paramètre d'entrée requis de l'outil sélectionné. + 4. ** Exécution: ** Il invoque le code sous-jacent ou l'appel API associé à l'outil Flowise sélectionné, passant les arguments d'entrée résolus. + 5. ** Génération de sortie: ** Il reçoit le résultat renvoyé par l'exécution de l'outil. + 6. ** Propagation de sortie: ** Il rend ce résultat disponible via son ancre de sortie pour les nœuds suivants. +* ** Paramètres de configuration ** + * ** Sélection d'outils **: Choisissez l'outil Flowise spécifique et enregistré que ce nœud exécutera à partir d'une liste déroulante. + * ** Arguments d'entrée **: Définissez comment les données de votre flux de travail sont fournies à l'outil sélectionné. Cette section s'adapte dynamiquement en fonction de l'outil choisi, présentant ses paramètres d'entrée spécifiques: + * ** Nom de l'argument de la carte **: Pour chaque entrée, l'outil sélectionné nécessite (par exemple,`input`Pour une calculatrice), ce champ affichera le nom du paramètre attendu tel que défini par l'outil lui-même. + * ** Fournir une valeur d'argument **: Définissez la valeur de ce paramètre correspondant, en utilisant une variable dynamique comme`{{ previousNode.output }}`, `{{ $flow.state.someKey }}`, ou en entrant un texte statique. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de cet outil sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** reçoit les données nécessaires pour les arguments de l'outil via le`Input Arguments`mappage, valeurs d'approvisionnement à partir des sorties de nœud précédentes,`$flow.state`ou configurations statiques. +* ** Sorties: ** produit la sortie brute générée par l'outil exécuté - par exemple, une chaîne JSON à partir d'une API, un résultat de texte ou une valeur numérique. + +
    + +*** + +### ** 5. Retriever Node ** + +Effectue une récupération d'informations ciblée à partir des magasins de documents configurés. + +* ** Fonctionnalité: ** Ce nœud interroge un ou plusieurs magasins de documents spécifiés, récupérant des morceaux de document pertinents basés sur la similitude sémantique. C'est une alternative ciblée à l'utilisation d'un nœud d'agent lorsque la seule action requise est la récupération et la sélection des outils dynamiques par un LLM n'est pas nécessaire. +* ** Paramètres de configuration ** + * ** Magasins de connaissances / documents **: Spécifiez quel (s) magasin de documents préconfigurés et peuplés, ce nœud doit interroger pour trouver des informations pertinentes. + * ** Retriever Query **: Définissez la requête texte qui sera utilisée pour rechercher les magasins de documents sélectionnés. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Format de sortie **: Choisissez comment les informations récupérées doivent être présentées - soit comme simple`Text`ou comme`Text with Metadata`, qui peut inclure des détails tels que les noms de documents source ou les emplacements. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de ce nœud Retriever sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** nécessite une chaîne de requête - souvent fournie comme une variable à partir d'une étape précédente ou d'une entrée utilisateur - et accède aux magasins de documents sélectionnés pour plus d'informations. +* ** sorties: ** produit les morceaux de document récupérés de la base de connaissances, formaté selon les choisis`Output Format`. + +
    + +*** + +### 6. nœud http + +Facilite la communication directe avec les services Web externes et les API via le protocole de transfert hypertexte (HTTP). + +* ** Fonctionnalité: ** Ce nœud permet au workflow d'interagir avec tout système externe accessible via HTTP. Il peut envoyer différents types de demandes (obtenir, publier, mettre, supprimer, patcher) à une URL spécifiée, permettant une intégration avec des API tierces, récupérer des données à partir de ressources Web ou déclencher des webhooks externes. Le nœud prend en charge la configuration des méthodes d'authentification, des en-têtes personnalisés, des paramètres de requête et différents types de corps de demande pour répondre aux diverses exigences d'API. +* ** Paramètres de configuration ** + * ** HTTP Idedential **: Sélectionnez éventuellement des informations d'identification préconfigurées - telles que l'authentification de base, le jeton de support ou la clé API - pour authentifier les demandes au service cible. + * ** Méthode de la demande **: Spécifiez la méthode HTTP à utiliser pour la demande - par exemple,`GET`, `POST`, `PUT`, `DELETE`, `PATCH`. + * ** URL cible **: Définissez l'URL complète du point de terminaison externe auquel la demande sera envoyée. + * ** En-têtes de demande **: Définissez tous les en-têtes HTTP nécessaires en paires de valeurs clés à inclure dans la demande. + * ** Paramètres de requête URL **: Définissez les paires de valeurs clés qui seront annexées à l'URL en tant que paramètres de requête. + * ** Type de corps de demande **: Choisissez le format de la charge utile de demande si l'envoi de données - les options incluent`JSON`, `Raw text`, `Form Data`, ou`x-www-form-urlencoded`. + * ** Demande Body **: Fournissez la charge utile de données réelle pour des méthodes comme le poste ou le put. Le format doit correspondre au sélectionné`Body Type`et les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Type de réponse **: Spécifiez comment le flux de travail doit interpréter la réponse reçue du serveur - les options incluent`JSON`, `Text`, `Array Buffer`, ou`Base64`pour les données binaires. +* ** Entrées: ** reçoit des données de configuration telles que l'URL, la méthode, les en-têtes et le corps, incorporant souvent des valeurs dynamiques à partir d'étapes de work`$flow.state`. +* ** Sorties: ** produit la réponse reçue du serveur externe, analysé selon le sélectionné`Response Type`. + +
    + +*** + +### ** 7. Node de condition ** + +Implémente la logique de ramification déterministe dans le flux de travail sur la base des règles définies. + +* ** Fonctionnalité: ** Ce nœud agit comme un point de décision, évaluant une ou plusieurs conditions spécifiées pour diriger le flux de travail dans différents chemins. Il compare les valeurs d'entrée - qui peuvent être des chaînes, des nombres ou des booléens - en utilisant une variété d'opérateurs logiques, tels que les égaux, contient, supérieur ou vide. Sur la base de la question de savoir si ces conditions évaluent en vrai ou fausse, l'exécution du flux de travail passe le long de l'une des branches de sortie distinctes connectées à ce nœud. +* ** Paramètres de configuration ** + * ** Conditions **: Configurez l'ensemble des règles logiques que le nœud évaluera. + * ** Type **: Spécifiez le type de données comparées pour cette règle -`String`, `Number`, ou`Boolean`. + * ** Valeur 1 **: Définissez la première valeur pour la comparaison. Les données dynamiques peuvent être insérées en utilisant`{{ variables }}`. + * ** Opération **: Sélectionnez l'opérateur logique à appliquer entre la valeur 1 et la valeur 2 - par exemple,`equal`, `notEqual`, `contains`, `larger`, `isEmpty`. + * ** Valeur 2 **: Définissez la deuxième valeur pour la comparaison, si nécessaire par l'opération choisie. Les données dynamiques peuvent également être insérées ici en utilisant`{{ variables }}`. +* ** Entrées: ** nécessite les données pour`Value 1`et`Value 2`pour chaque condition évaluée. Ces valeurs sont fournies à partir des sorties de nœud précédentes ou récupérées à partir de`$flow.state`. +* ** Sorties: ** fournit plusieurs ancres de sortie, correspondant au résultat booléen (vrai / faux) des conditions évaluées. Le flux de travail continue le long du chemin spécifique connecté à l'ancre de sortie qui correspond au résultat. + +
    + +*** + +### ** 8. Node d'agent de condition ** + +Fournit une ramification dynamique basée sur l'IA basée sur les instructions et le contexte du langage naturel. + +* ** Fonctionnalité: ** Ce nœud utilise un modèle grand langage (LLM) pour acheminer le workflow. Les analyses des analyses ont fourni des données d'entrée par rapport à un ensemble de "scénarios" définis par l'utilisateur - résultats ou catégories potentiels - guidés par des "instructions" de langage naturel de haut niveau qui définissent la tâche de prise de décision. Le LLM détermine ensuite quel scénario correspond le mieux au contexte d'entrée actuel. Sur la base de cette classification dirigée par l'IA, l'exécution du flux de travail réduit le chemin de sortie spécifique correspondant au scénario choisi. Ce nœud est particulièrement utile pour les tâches telles que la reconnaissance de l'intention des utilisateurs, le routage conditionnel complexe ou la prise de décision situationnelle nuancée où des règles simples et prédéfinies - comme dans le nœud de condition - sont insuffisantes. +* ** Paramètres de configuration ** + * ** Modèle **: Spécifie le modèle AI à partir d'un service choisi qui effectuera l'analyse et la classification des scénarios. + * ** Instructions **: Définissez l'objectif ou la tâche globale du LLM en langage naturel - par exemple, "Déterminez si la demande de l'utilisateur concerne les ventes, le support ou la demande générale." + * ** Entrée **: spécifiez les données, souvent du texte à partir d'une étape précédente ou d'une entrée utilisateur, en utilisant`{{ variables }}`, que le LLM analysera pour prendre sa décision de routage. + * ** Scénarios **: Configurer un tableau définissant les résultats possibles ou les chemins distincts que le flux de travail peut prendre. Chaque scénario est décrit dans le langage naturel - par exemple, «Enquête sur les ventes», «demande de support», «question générale» - et chacune correspond à une ancre de sortie unique sur le nœud. +* ** Entrées: ** nécessite le`Input`Données pour l'analyse et le`Instructions`Pour guider le LLM. +* ** sorties: ** fournit plusieurs ancres de sortie, une pour chaque définie`Scenario`. Le workflow continue le long du chemin spécifique connecté à l'ancre de sortie que le LLM détermine le meilleur correspond à l'entrée. + +
    + +*** + +### ** 9. Node d'itération ** + +Exécute un "sous-flux" défini - une séquence de nœuds imbriqués - pour chaque élément d'un tableau d'entrée, implémentant une boucle "for-een". + +* ** Fonctionnalité: ** Ce nœud est conçu pour le traitement des collections de données. Il prend un tableau, fourni directement ou référencé via une variable, comme entrée. Pour chaque élément individuel à l'intérieur de ce tableau, le nœud d'itération exécute séquentiellement la séquence d'autres nœuds qui sont visuellement placés à l'intérieur de ses limites sur la toile. +* ** Paramètres de configuration ** + * ** Entrée du tableau **: Spécifie le tableau d'entrée que le nœud iratera. Ceci est fourni en faisant référence à une variable qui contient un tableau à partir de la sortie d'un nœud précédent ou du`$flow.state`- par exemple,`{{ $flow.state.itemList }}`. +* ** Entrées: ** nécessite un tableau à fournir à son`Array Input`paramètre. +* ** Sorties: ** Fournit une seule anage de sortie qui ne devient active qu'après que le sous-flux imbriqué a terminé l'exécution pour tous les éléments du tableau d'entrée. Les données transmises par cette sortie peuvent inclure des résultats agrégés ou l'état final des variables modifié dans la boucle, selon la conception du sous-flux. Les nœuds placés à l'intérieur du bloc d'itération ont leurs propres connexions d'entrée et de sortie distinctes qui définissent la séquence d'opérations pour chaque élément. + +
    + +*** + +### ** 10. Node de boucle ** + +Redirige explicitement l'exécution du workflow vers un nœud précédemment exécuté. + +* ** Fonctionnalité: ** Ce nœud permet la création de cycles ou de tentatives itératives dans un workflow. Lorsque le flux d'exécution atteint le nœud de boucle, il n'atteint pas un nouveau nœud; Au lieu de cela, il "remonte" à un nœud cible spécifié qui a déjà été exécuté plus tôt dans l'exécution actuelle du flux de travail. Cette action provoque la réexécution de ce nœud cible et de tous les nœuds suivants dans cette partie de l'écoulement. +* ** Paramètres de configuration ** + * ** Loop Retour à **: Sélectionne l'ID unique d'un nœud précédemment exécuté dans le flux de travail actuel auquel l'exécution doit retourner. + * ** MAX LOOP COUNT **: Définit le nombre maximal de fois que cette opération de boucle peut être effectuée dans une seule exécution de workflow, sauvegarde contre les cycles infinis. La valeur par défaut est 5. +* ** Entrées: ** Reçoit le signal d'exécution pour activer. Il suit en interne le nombre de fois que la boucle s'est produite pour l'exécution actuelle. +* ** Sorties: ** Ce nœud n'a pas d'ancre de sortie standard à pointant, car sa fonction principale est de rediriger le flux d'exécution vers l'arrière vers le`Loop Back To`Node cible, d'où le flux de travail continue alors. + +
    + +*** + +### ** 11. Node d'entrée humain ** + +Utilise l'exécution du workflow pour demander des entrées, une approbation ou des commentaires explicites d'un utilisateur humain - un composant clé pour les processus humains dans la boucle (HITL). + +* ** Fonctionnalité: ** Ce nœud arrête la progression automatisée du flux de travail et présente des informations ou une question à un utilisateur humain, via l'interface de chat. Le contenu affiché à l'utilisateur peut être un texte statique prédéfini ou généré dynamiquement par un LLM basé sur le contexte de workflow actuel. L'utilisateur reçoit des choix d'action distincts - par exemple, «procéder», «rejeter» - et, s'il est activé, un champ pour fournir des commentaires textuels. Une fois que l'utilisateur fait une sélection et soumet sa réponse, le flux de travail reprend l'exécution le long du chemin de sortie spécifique correspondant à son action choisie. +* ** Paramètres de configuration ** + * ** Type de description **: détermine comment le message ou la question présentée à l'utilisateur est généré - soit`Fixed`(texte statique) ou`Dynamic`(généré par un LLM). + * ** Si le type de description est`Fixed`** + * ** Description **: Ce champ contient le texte exact à afficher à l'utilisateur. Il prend en charge l'insertion de données dynamiques en utilisant`{{ variables }}` + * **Si`Description Type`est`Dynamic`** + * ** Modèle **: sélectionne le modèle AI dans un service choisi qui générera le message orienté utilisateur. + * ** invite **: fournit les instructions ou l'invite pour le LLM sélectionné pour générer le message affiché à l'utilisateur. + * ** Feedback: ** Si activé, l'utilisateur sera invité avec une fenêtre de rétroaction pour laisser ses commentaires, et ces commentaires seront annexés à la sortie du nœud. +* ** Entrées: ** Reçoit le signal d'exécution pour suspendre le workflow. Il peut utiliser les données des étapes précédentes ou`$flow.state`à travers des variables dans le`Description`ou`Prompt`champs s'ils sont configurés pour le contenu dynamique. +* ** Sorties: ** Fournit deux ancres de sortie, chacune correspondant à une action utilisateur distincte - une ancre pour "procéder" et une autre pour "rejeter". Le flux de travail continue le long du chemin connecté à l'ancre correspondant à la sélection de l'utilisateur. + +
    + +*** + +### ** 12. Node de réponse directe ** + +Envoie un message final à l'utilisateur et termine le chemin d'exécution actuel. + +* ** Fonctionnalité: ** Ce nœud sert de point de terminaison pour une branche spécifique ou l'intégralité d'un workflow. Il prend un message configuré - qui peut être du texte statique ou du contenu dynamique d'une variable - et le livre directement à l'utilisateur final via l'interface de chat. Lors de l'envoi de ce message, l'exécution le long de ce chemin particulier du workflow conclut; Aucun autre nœud connecté à partir de ce point ne sera traité. +* ** Paramètres de configuration ** + * ** Message **: Définissez le texte ou la variable`{{ variable }}`Cela contient le contenu à envoyer comme réponse finale à l'utilisateur. +* ** Entrées: ** reçoit le contenu du message, qui provient de la sortie d'un nœud précédent ou d'une valeur stockée dans`$flow.state`. +* ** Sorties: ** Ce nœud n'a pas d'ancres de sortie, car sa fonction est de terminer le chemin d'exécution après avoir envoyé la réponse. + +
    + +*** + +### ** 13. Node de fonction personnalisé ** + +Fournit un mécanisme pour exécuter le code JavaScript côté serveur personnalisé dans le workflow. + +* ** Fonctionnalité: ** Ce nœud permet d'écrire et d'exécuter des extraits arbitraires JavaScript, offrant un moyen efficace d'implémenter des transformations de données complexes, une logique métier sur mesure ou des interactions avec des ressources non directement prises en charge par d'autres nœuds standard. Le code exécuté fonctionne dans un environnement Node.js et a des moyens spécifiques d'accéder aux données: + * ** Variables d'entrée: ** Valeurs passées via le`Input Variables`La configuration est accessible dans la fonction, généralement préfixée avec- par exemple, si une variable d'entrée`userid`est défini, il est accessible comme`$userid`. + * ** Contexte de flux: ** Les variables de configuration de flux par défaut sont disponibles, telles que`$flow.sessionId`, `$flow.chatId`, `$flow.chatflowId`, `$flow.input`- l'entrée initiale qui a commencé le flux de travail - et l'ensemble`$flow.state`objet. + * ** Variables personnalisées: ** Toutes les variables personnalisées configurées dans Flowise - par exemple,`$vars.`. + * ** Bibliothèques: ** La fonction peut utiliser toutes les bibliothèques qui ont été importées et rendues disponibles dans l'environnement backend Flowise. ** La fonction doit renvoyer une valeur de chaîne à la fin de son exécution **. +* ** Paramètres de configuration ** + * ** Variables d'entrée **: Configurez un tableau de définitions d'entrée qui seront transmises sous forme de variables dans la portée de votre fonction JavaScript. Pour chaque variable que vous souhaitez définir, vous spécifierez: + * ** Nom de la variable **: le nom que vous utiliserez pour vous référer à cette variable dans votre code JavaScript, généralement préfixé avec un- par exemple, si vous entrez`myValue`Ici, vous pourriez y accéder comme`$myValue`Dans le script, correspondant à la façon dont les propriétés du schéma d'entrée sont mappées. + * ** Valeur variable **: les données réelles à affecter à cette variable, qui peut être un texte statique ou, plus souvent, une valeur dynamique provenant du flux de travail - par exemple,`{{ previousNode.output }}`ou`{{ $flow.state.someKey }}`. + * ** Fonction JavaScript **: le champ de l'éditeur de code où la fonction JavaScript côté serveur est écrite. Cette fonction doit finalement renvoyer une valeur de chaîne. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de chaîne de ce nœud de fonction personnalisé sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** reçoit des données via les variables configurées dans`Input Variables`. Peut également accéder implicitement aux éléments du`$flow`contexte et`$vars`. +* ** Sorties: ** produit la valeur de chaîne renvoyée par la fonction JavaScript exécutée. + +
    + +*** + +### ** 14. Exécuter le nœud de flux ** + +Permet l'invocation et l'exécution d'un autre ChatFlow Flowise complet ou d'agentflow à partir du flux de travail actuel. + +* ** Fonctionnalité: ** Ce nœud fonctionne comme un appelant sous-travail, faisant la promotion de la conception modulaire et de la réutilisabilité de la logique. Il permet au flux de travail actuel de déclencher un flux de travail préexistant séparé - identifié par son nom ou son ID dans l'instance FLUSED - passez une entrée initiale à lui, remplace éventuellement des configurations spécifiques du flux cible pour cette exécution particulière, puis reçoit sa sortie finale dans le flux de travail d'appel pour continuer le traitement. +* ** Paramètres de configuration ** + * ** Connectez les informations d'identification **: Fournissez éventuellement les informations d'identification de l'API ChatFlow si le flux cible étant appelé nécessite une authentification spécifique ou des autorisations d'exécution. + * ** Sélectionnez Flow **: Spécifiez le ChatFlow ou AgentFlow particulier que ce nœud exécutera à partir de la liste des flux disponibles dans votre instance Flowise. + * ** Entrée **: Définissez les données - Texte statique ou`{{ variable }}`- qui sera transmis comme entrée principale au workflow cible lorsqu'il sera invoqué. + * ** Remplacez la configuration **: Fournissez éventuellement un objet JSON contenant des paramètres qui remplaceront la configuration par défaut du flux de travail cible spécifiquement pour cette instance d'exécution - par exemple, modifiant temporairement un modèle ou une invite utilisée dans le sous-flux. + * ** URL de base **: Spécifiez éventuellement une URL de base alternative pour l'instance fluide qui héberge le flux cible. Ceci est utile dans les configurations distribuées ou lorsque les débits sont accessibles via différents itinéraires, défautant à l'URL de l'instance actuelle si ce n'est pas défini. + * ** RETOUR RÉPONSE AS **: Déterminez comment la sortie finale du sous-flux exécuté doit être classée lorsqu'elle est retournée au flux de travail actuel - en tant que`User Message`ou`Assistant Message`. + * ** Mettre à jour l'état de flux **: permet au nœud de modifier l'état d'exécution du workflow`$flow.state`Pendant l'exécution en mettant à jour les clés prédéfinies. Cela permet, par exemple, de stocker la sortie de cette exécution du nœud de flux sous une telle clé, ce qui le rend accessible aux nœuds suivants. +* ** Entrées: ** nécessite la sélection d'un flux cible et du`Input`données pour cela. +* ** sorties: ** produit la sortie finale renvoyée par le flux de travail cible exécuté, formaté en fonction du`Return Response As`paramètre. + +
    + +## Comprendre l'état de flux + +Une caractéristique architecturale clé permettant la flexibilité et les capacités de gestion des données d'agentflow v2 est l'état de flux ** **. Ce mécanisme fournit un moyen de gérer et de partager les données dynamiquement tout au long de l'exécution d'une seule instance de workflow. + +### ** Qu'est-ce que l'état de flux? ** + +* État de flux (`$flow.state`) est un ** Runtime, Key-Value Store ** qui est partagé entre les nœuds en une seule exécution. +* Il fonctionne comme une mémoire temporaire ou un contexte partagé qui existe uniquement pour la durée de cette exécution / exécution particulière. + +### ** But de l'état de flux ** + +Le but principal de`$flow.state`est d'activer ** le partage et la communication explicites de données entre les nœuds, en particulier ceux qui peuvent ne pas être directement connectés ** dans le graphique de workflow, ou lorsque les données doivent être intentionnellement persistantes et modifiées sur plusieurs étapes. Il relève plusieurs défis d'orchestration courants: + +1. ** transmettre des données sur les branches: ** Si un flux de travail se divise dans les chemins conditionnels, les données générées ou mises à jour dans une branche peuvent être stockées dans`$flow.state`pour accéder plus tard si les chemins fusionnent ou si d'autres branches ont besoin de ces informations. +2. ** L'accès aux données sur des étapes non adjacentes: ** Les informations initialisées ou mises à jour par un nœud précoce peuvent être récupérées par un nœud beaucoup plus tard sans avoir à le passer explicitement via les entrées et sorties de chaque nœud intermédiaire. + +### ** Comment fonctionne l'état de flux ** + +1. ** Initialisation / Déclaration des clés ** + * Toutes les clés d'état qui seront utilisées tout au long du workflow ** doivent être initialisées ** avec leurs valeurs par défaut (même si vides) en utilisant le`Flow State`Paramètre dans le nœud ** de démarrage **. Cette étape déclare efficacement le schéma ou la structure de votre`$flow.state`pour ce flux de travail. Vous définissez ici les paires de valeurs clés initiales. + +
    + +2. ** Mise à jour de l'état / Modification des clés existantes ** + +* De nombreux nœuds opérationnels - par exemple,`LLM`, `Agent`, `Tool`, `HTTP`, `Retriever`, `Custom Function`- Inclure un`Update Flow State`paramètre dans leur configuration. +* Ce paramètre permet au nœud ** de modifier les valeurs des clés préexistantes ** à l'intérieur`$flow.state`. +* La valeur peut être du texte statique, la sortie directe du nœud actuel, la sortie du nœud précédent et de nombreuses autres variables. Taper`{{`affichera toutes les variables disponibles. +* Lorsque le nœud s'exécute avec succès, il ** met à jour ** la ou les touches spécifiées dans`$flow.state`avec la (s) valeur (s). ** Les nouvelles clés ne peuvent pas être créées par des nœuds opérationnels; Seules les clés prédéfinies peuvent être mises à jour. ** + + + +3. ** Lire de l'état ** + +* Tout paramètre d'entrée de nœud qui accepte les variables peut lire les valeurs de l'état de flux. +* Utilisez la syntaxe spécifique:`{{ $flow.state.yourKey }}`- remplacer`yourKey`avec le nom de clé réel qui a été initialisé dans le nœud de démarrage. +* Par exemple, l'invite d'un nœud LLM peut inclure`"...based on the user status: {{ $flow.state.customerStatus }}"`. + + + +### ** Portée et persistance: ** + +* Il est créé et initialisé lorsqu'une exécution de workflow commence et est détruite lorsque cette exécution spécifique se termine. +* Il ** ne persiste pas dans différentes sessions utilisateur ou des exécutions séparées du même flux de travail. +* Chaque exécution simultanée du flux de travail maintient son propre`$flow.state`. + +## Ressources vidéo + +{% embed url = "https://youtu.be/slvvduibibe?si=VU1M_BTFDZVNL-PP"%} + +{% embed url = "https://youtu.be/h9n9wcrp9u4?si=8-9a9fktpxaykxxh"%} diff --git a/fr/using-flowise/analytics/Langwatch.md b/fr/using-flowise/analytics/Langwatch.md new file mode 100644 index 00000000..8650292e --- /dev/null +++ b/fr/using-flowise/analytics/Langwatch.md @@ -0,0 +1,33 @@ +--- +description: Learn how to setup LangWatch to analyze and troubleshoot your chatflows and agentflows +--- + +# Langwatch + +--- + +[Langwatch](https://langwatch.ai)est une observabilité de qualité de production et une plate-forme LLMOPS conçue pour surveiller, déboguer et améliorer les applications LLM et les agents d'IA à grande échelle. + +## Installation + +1. Dans le coin supérieur droit de votre ChatFlow ou AgentFlow, cliquez sur ** Paramètres **> ** Configuration ** + + Capture d'écran de l'utilisateur cliquant dans le menu de configuration
    + +2. Ensuite, allez à la section Analyser Chatflow + +
    Capture d'écran de la section Analyze Chatflow avec les différents fournisseurs d'analyses
    + +3. Vous verrez une liste de fournisseurs, ainsi que leurs champs de configuration. Cliquez sur Langwatch. + + ScreenShot d'un fournisseur d'analyse avec des champs d'identification étendus
    + +4. Si vous ne l'avez pas déjà fait, inscrivez-vous pour un compte gratuit[here](https://app.langwatch.ai)Pour obtenir votre clé API. + +5. Remplissez les détails de la configuration, puis allumez le fournisseur ** sur ** et cliquez sur ** Enregistrer ** + + Captures d'écran des fournisseurs d'analyse activé
+
+6. Vous pouvez désormais utiliser Langwatch pour analyser et dépanner vos chatflows et vos flux d'agents. Reportez-vous au[official guide](https://docs.langwatch.ai)pour plus de détails.
+
+<gigne> <img src =
    + +En outre, il existe également plusieurs fournisseurs d'analyse qui circulent avec: + +* [LunaryAI](https://lunary.ai/) +* [Langsmith](https://smith.langchain.com/) +* [Langfuse](https://langfuse.com/) +* [LangWatch](https://langwatch.ai/) +* [Arize](https://arize.com/) +* [Phoenix](https://phoenix.arize.com/) +* [Opik](https://www.comet.com/site/products/opik/) + +## Installation + +1. Dans le coin supérieur droit de votre ChatFlow ou AgentFlow, cliquez sur ** Paramètres **> ** Configuration ** + + Capture d'écran de l'utilisateur cliquant dans le menu de configuration
    + +2. Ensuite, allez à la section Analyser Chatflow + +
    Capture d'écran de la section Analyze Chatflow avec les différents fournisseurs d'analyses
    + +3. Vous verrez une liste des fournisseurs, ainsi que leurs champs de configuration + + ScreenShot d'un fournisseur d'analyse avec des champs d'identification étendus
    + +4. Remplissez les informations d'identification et autres détails de configuration, puis allumez le fournisseur ** sur **. Cliquez sur Enregistrer. + + ScreenShot of Analytics Providers activé
    + +## API + +Une fois l'analytique allumé de l'interface utilisateur, vous pouvez remplacer ou fournir une configuration supplémentaire dans le corps du[Prediction API](api.md#prediction-api): + +```json +{ + "question": "hi there", + "overrideConfig": { + "analytics": { + "langFuse": { + // langSmith, langFuse, lunary, langWatch, opik + "userId": "user1" + } + } + } +} +``` diff --git a/fr/using-flowise/analytics/arize.md b/fr/using-flowise/analytics/arize.md new file mode 100644 index 00000000..16c293d3 --- /dev/null +++ b/fr/using-flowise/analytics/arize.md @@ -0,0 +1,31 @@ +--- +description: Learn how to setup Arize to analyze and troubleshoot your chatflows and agentflows +--- + +# Se manifester + +*** + +[Arize AI](https://docs.arize.com/arize)est une plate-forme d'observabilité de qualité de production pour surveiller, déboguer et améliorer les applications LLM et les agents d'IA à grande échelle. Pour une alternative libre et open source, explorez[Phoenix](https://docs.flowiseai.com/using-flowise/analytics/phoenix). + +## Installation + +1. Dans le coin supérieur droit de votre ChatFlow ou AgentFlow, cliquez sur ** Paramètres **> ** Configuration ** + + Capture d'écran de l'utilisateur cliquant dans le menu de configuration
    + +2. Ensuite, allez à la section Analyser Chatflow + +
    Capture d'écran de la section Analyze Chatflow avec les différents fournisseurs d'analyses
    + +3. Vous verrez une liste de fournisseurs, ainsi que leurs champs de configuration. Cliquez sur Arize. + + Écran d'un fournisseur d'analyse avec des champs d'identification étendus
    + +4. Créez des informations d'identification pour Arize. Reportez-vous au[official guide](https://docs.arize.com/arize/llm-tracing/quickstart-llm#get-your-api-keys)sur la façon d'obtenir la clé API Arize. + + ScreenShot of Analytics Providers activé
    + +5. Remplissez les autres détails de configuration, puis allumez le fournisseur ** sur ** + + ScreenShot of Analytics Providers activé
    diff --git a/fr/using-flowise/analytics/langfuse.md b/fr/using-flowise/analytics/langfuse.md new file mode 100644 index 00000000..1ab78280 --- /dev/null +++ b/fr/using-flowise/analytics/langfuse.md @@ -0,0 +1,11 @@ +# Langfuse + +[Langfuse](https://langfuse.com)est une plate-forme d'ingénierie LLM open source qui aide les équipes à tracer les appels d'API, à surveiller les performances et à déboguer dans leurs applications d'IA. + +Avec l'intégration native, vous pouvez utiliser Flowise pour créer rapidement des applications LLM complexes dans sans code, puis utiliser Langfuse pour les surveiller et les améliorer. + +L'intégration prend en charge tous les cas d'utilisation de Flowise, notamment: interactivement dans l'interface utilisateur, l'API et les intégres. + +{% embed url = "https://youtu.be/ifssw6hhoa0"%} + +Vous pouvez éventuellement ajouter`release`Pour marquer la version actuelle du flux. Vous n'avez généralement pas besoin de modifier les autres options. diff --git a/fr/using-flowise/analytics/lunary.md b/fr/using-flowise/analytics/lunary.md new file mode 100644 index 00000000..1e9db7b7 --- /dev/null +++ b/fr/using-flowise/analytics/lunary.md @@ -0,0 +1,9 @@ +# Lunaire + +[Lunary](https://lunary.ai/)est une plate-forme de surveillance et d'analyse pour les chatbots LLM. + +Flowise s'est associé à Lunary pour fournir une intégration complète prenant en charge le traçage des utilisateurs, le suivi des commentaires, les rediffusions de conversation et l'analyse LLM détaillée. + +Les utilisateurs Flowise peuvent obtenir une réduction de 30% sur le plan des équipes en utilisant le code`FLOWISEFRIENDS`Pendant la caisse. + +En savoir plus sur la façon de configurer Lunary avec Flowise[here](https://lunary.ai/docs/integrations/flowise). diff --git a/fr/using-flowise/analytics/opik.md b/fr/using-flowise/analytics/opik.md new file mode 100644 index 00000000..0249f7ca --- /dev/null +++ b/fr/using-flowise/analytics/opik.md @@ -0,0 +1,34 @@ +--- +description: Learn how to setup Opik to analyze and troubleshoot your chatflows and agentflows +--- + +# Opik + +*** + +## Installation + +1. Dans le coin supérieur droit de votre ChatFlow ou AgentFlow, cliquez sur ** Paramètres **> ** Configuration ** + + Capture d'écran de l'utilisateur cliquant dans le menu de configuration
    + +2. Ensuite, allez à la section Analyser Chatflow + +
    Capture d'écran de la section Analyze Chatflow avec les différents fournisseurs d'analyses
    + +3. Vous verrez une liste de fournisseurs, ainsi que leurs champs de configuration. Cliquez sur OPIK. + + ScreenShot d'un fournisseur d'analyse avec des champs d'identification étendus
    + +4. Créez des informations d'identification pour OPIK. Reportez-vous au[official guide](https://www.comet.com/docs/opik/tracing/sdk_configuration)Sur la façon d'obtenir la clé API OPIK. + + Capture d'écran des fournisseurs d'analyse activés
    + +5. Remplissez les autres détails de configuration, puis allumez le fournisseur ** sur ** + +
    Capture d'écran des fournisseurs d'analyse activés
    + + +Vous pouvez maintenant analyser vos chatflows et vos Agentflows à l'aide de l'interface utilisateur OPIK: + + Capture d'écran d'Opik UI
    \ No newline at end of file diff --git a/fr/using-flowise/analytics/phoenix.md b/fr/using-flowise/analytics/phoenix.md new file mode 100644 index 00000000..8af8f71d --- /dev/null +++ b/fr/using-flowise/analytics/phoenix.md @@ -0,0 +1,31 @@ +--- +description: Learn how to setup Phoenix to analyze and troubleshoot your chatflows and agentflows +--- + +# Phénix + +*** + +[Phoenix](https://docs.arize.com/phoenix/self-hosting)est un outil d'observabilité open source conçu pour l'expérimentation, l'évaluation et le dépannage des applications AI et LLM. Il peut être un accès dans son[Cloud](https://app.phoenix.arize.com/login)formulairez en ligne ou auto-hébergé et exécutez sur votre propre machine ou serveur. + +## Installation + +1. Dans le coin supérieur droit de votre ChatFlow ou AgentFlow, cliquez sur ** Paramètres **> ** Configuration ** + + Capture d'écran de l'utilisateur cliquant dans le menu de configuration
    + +2. Ensuite, allez à la section Analyser Chatflow + +
    Capture d'écran de la section Analyze Chatflow avec les différents fournisseurs d'analyses
    + +3. Vous verrez une liste de fournisseurs, ainsi que leurs champs de configuration. Cliquez sur Phoenix. + + ScreenShot d'un fournisseur d'analyse avec des champs d'identification étendus
    + +4. Créez des informations d'identification pour Phoenix. Reportez-vous au[official guide](https://docs.arize.com/phoenix/environments)Sur la façon d'obtenir la clé API Phoenix. + + ScreenShot of Analytics Providers activé
    + +5. Remplissez les autres détails de configuration, puis allumez le fournisseur ** sur **. Cliquez sur Enregistrer. + + ScreenShot of Analytics Providers activé
    diff --git a/fr/using-flowise/document-stores.md b/fr/using-flowise/document-stores.md new file mode 100644 index 00000000..354ad3f4 --- /dev/null +++ b/fr/using-flowise/document-stores.md @@ -0,0 +1,1036 @@ +--- +description: Learn how to use the Flowise Document Stores, written by @toi500 +--- + +# Magasins de documents + +*** + +Les magasins de documents de Flowise offrent une approche polyvalente de la gestion des données, vous permettant de télécharger, de fendre et de préparer votre ensemble de données et de le mettre en œuvre en un seul endroit. + +Cette approche centralisée simplifie la gestion des données et permet une gestion efficace de divers formats de données, ce qui facilite l'organisation et l'accès à vos données dans l'application Flowise. + +## Installation + +Dans ce tutoriel, nous installerons un[Retrieval Augmented Generation (RAG)](broken-reference/)Système pour récupérer des informations sur la politique de maison de luxe _LiberTyGuard Policy_, un sujet sur lequel les LLM ne sont pas largement formées. + +En utilisant les magasins de documents ** Flowise **, nous préparerons et améliorerons les données sur LibertyGuard et son ensemble de polices d'assurance habitation. Cela permettra à notre système de chiffon de répondre avec précision aux requêtes des utilisateurs sur les offres d'assurance habitation de Libertyguard. + +## 1. Ajouter une boutique de documents + +Commencez par ajouter un magasin de documents et le nommer. Dans notre cas, "Politique des propriétaires de Libertyguard Deluxe". + +
    + +## 2. Sélectionnez un chargeur de documents + +Entrez le magasin de documents que vous venez de créer et sélectionnez le[Document Loader](../integrations/langchain/document-loaders/)vous souhaitez utiliser. Dans notre cas, puisque notre ensemble de données est au format PDF, nous utiliserons le[PDF Loader](../integrations/langchain/document-loaders/pdf-file.md). + +Les chargeurs de documents sont des nœuds spécialisés qui gèrent l'ingestion de divers formats de documents. + +
    + +
    + +## 3. Préparez vos données + +### Étape 1: chargeur de documents + +* Tout d'abord, nous commençons par télécharger notre fichier PDF. +* Ensuite, nous ajoutons une ** clé de métadonnées uniques **. Ceci est facultatif, mais une bonne pratique car elle nous permet de cibler et de filtrer ce même ensemble de données plus tard si nous en avons besoin. +* Chaque chargeur est livré avec des métadonnées préconfigurées, dans certains cas, vous pouvez utiliser des clés de métadonnées omit pour éliminer les métadonnées inutiles. + +
    + +### Étape 2: séparateur de texte + +* Sélectionnez le[Text Splitter](../integrations/langchain/text-splitters/)Vous souhaitez utiliser pour sélectionner vos données. Dans notre cas particulier, nous utiliserons le[Recursive Character Text Splitter](../integrations/langchain/text-splitters/recursive-character-text-splitter.md). +* Le séparateur de texte est utilisé pour diviser les documents chargés en pièces, documents ou morceaux plus petits. Il s'agit d'une étape de prétraitement cruciale pour 2 raisons principales: + + * ** Vitesse et pertinence de récupération: ** Le stockage et l'interrogation de gros documents en tant qu'entités uniques dans une base de données vectorielle peuvent conduire à des temps de récupération plus lents et à des résultats potentiellement moins pertinents. La division du document en morceaux plus petits permet une récupération plus ciblée. En interrogeant contre des unités d'information plus petites et plus ciblées, nous pouvons atteindre des temps de réponse plus rapides et améliorer la précision des résultats récupérés. + * ** RETENDANT: ** Puisque nous ne récupérons que des morceaux pertinents plutôt que le document entier, le nombre de jetons traités par le LLM est considérablement réduit. Cette approche de récupération ciblée se traduit directement par une baisse des coûts d'utilisation de notre LLM, car la facturation est généralement basée sur la consommation de jetons. En minimisant la quantité d'informations non pertinentes envoyées à la LLM, nous optimisons également pour le coût. + +Il existe différentes stratégies de section de texte, notamment: + + * ** Clissage du texte des caractères: ** Divide le texte en morceaux d'un nombre fixe de caractères. Cette méthode est simple mais peut diviser des mots ou des phrases sur des morceaux, perturbant potentiellement le contexte. + * ** Diffusion du texte de jeton: ** Segmentation du texte en fonction des limites des mots ou des schémas de tokenisation spécifiques au modèle d'intégration choisi. Cette approche conduit souvent à des morceaux plus cohérents sémantiquement, car il préserve les limites des mots et considère la structure linguistique sous-jacente du texte. + * ** Diffusion du texte récursif du caractère: ** Cette stratégie vise à diviser le texte en morceaux qui maintiennent la cohérence sémantique tout en restant dans une limite de taille spécifiée. Il est particulièrement bien adapté aux documents hiérarchiques avec des sections ou des titres imbriqués. Au lieu de se diviser aveuglément à la limite de caractère, il analyse récursivement le texte pour trouver des points d'arrêt logiques, tels que les fins de phrase ou les ruptures de section. Cette approche garantit que chaque morceau représente une unité d'information significative, même si elle dépasse légèrement la taille cible. + * ** Splitter de texte de marque: ** Conçu spécifiquement pour les documents formulés Markdown, ce séparateur segmente logiquement le texte basé sur des en-têtes de démarque et des éléments structurels, créant des morceaux qui correspondent à des sections logiques dans le document. + * ** Splitter de texte de code: ** Adapté pour la division des fichiers de code, cette stratégie considère la structure du code, les définitions de fonction et d'autres éléments spécifiques au langage de programmation pour créer des morceaux significatifs qui conviennent aux tâches telles que la recherche et la documentation de code. + * ** Splitter de texte HTML à markdown: ** Ce séparateur spécialisé convertit d'abord le contenu HTML à Markdown, puis applique le séparateur de texte Markdown, permettant une segmentation structurée des pages Web et d'autres documents HTML. + +Vous pouvez également personnaliser les paramètres tels que: + + * ** Taille du morceau: ** La taille maximale souhaitée de chaque morceau, généralement définie en caractères ou en jetons. + * ** chevauchement de morceaux: ** Le nombre de caractères ou de jetons à chevaucher entre des morceaux consécutifs, utile pour maintenir le flux contextuel à travers des morceaux. + +{% hint style = "info"%} +Dans ce guide, nous avons ajouté une généreuse taille ** de chevauchement ** pour nous assurer qu'aucune donnée pertinente ne manque de morceaux. Cependant, la taille optimale du chevauchement dépend de la complexité de vos données. Vous devrez peut-être ajuster cette valeur en fonction de votre ensemble de données spécifique et de la nature des informations que vous souhaitez extraire. +{% EndHint%} + +
    + +## 4. Aperçu de vos données + +Nous pouvons désormais prévisualiser comment nos données seront ouvertes en utilisant notre actuel[Text Splitter](../integrations/langchain/text-splitters/)configuration;`chunk_size=1500`et`chunk_overlap=750`. + +
    + +Il est important d'expérimenter avec différents[Text Splitters](../integrations/langchain/text-splitters/), Tailles de morceaux et se chevaucher des valeurs pour trouver la configuration optimale pour votre ensemble de données spécifique. Cet aperçu vous permet d'affiner le processus de chasse et de vous assurer que les morceaux résultants conviennent à votre système de chiffon. + +
    + +{% hint style = "info"%} +Notez que nos métadonnées personnalisées`company: "liberty"`a été inséré dans chaque morceau. Ces métadonnées nous permettent de filtrer et de récupérer facilement des informations à partir de cet ensemble de données spécifiques plus tard, même si nous utilisons le même index de magasin vectoriel pour d'autres ensembles de données. +{% EndHint%} + +### Comprendre le chevauchement de morceaux + +Dans le contexte de la récupération basée sur les vecteurs et de la requête LLM, le chevauchement de morceaux joue un ** rôle important dans le maintien de la continuité contextuelle ** et ** Amélioration de la précision de la réponse **, en particulier lorsqu'il s'agit d'une profondeur de récupération limitée ou ** Top K **, qui est le paramètre qui détermine le nombre maximum de la plupart des morceaux similaires qui sont récupérés à partir de la[Vector Store](https://docs.flowiseai.com/integrations/langchain/vector-stores)en réponse à une requête. + +Pendant le traitement des requêtes, le LLM exécute une recherche de similitude contre le magasin vectoriel pour récupérer les morceaux les plus pertinents sémantiquement à la requête donnée. Si la profondeur de récupération, représentée par le paramètre K supérieur, est définie sur une petite valeur, 4 pour par défaut, le LLM utilise initialement des informations uniquement à partir de ces 4 morceaux pour générer sa réponse. + +Ce scénario nous présente un problème, car le fait de s'appuyer uniquement sur un nombre limité de morceaux sans chevauchement peut entraîner des réponses incomplètes ou inexactes, en particulier lorsqu'ils traitent des requêtes qui nécessitent des informations couvrant plusieurs morceaux. + +Le chevauchement des morceaux aide à ce problème en s'assurant qu'une partie du contexte textuel est partagée sur des morceaux consécutifs, ** augmentant la probabilité que toutes les informations pertinentes pour une requête donnée soient contenues dans les morceaux récupérés **. + +En d'autres termes, ce chevauchement sert de pont entre des morceaux, permettant au LLM d'accéder à une fenêtre contextuelle plus large même lorsqu'elle est limitée à un petit ensemble de morceaux récupérés (haut K). Si une requête est liée à un concept ou à une information qui s'étend au-delà d'un seul morceau, les régions qui se chevauchent augmentent la probabilité de capturer tout le contexte nécessaire. + +Par conséquent, en introduisant un chevauchement de morceaux pendant la phase de division du texte, nous améliorons la capacité du LLM à: + +1. ** Préserver la continuité contextuelle: ** Les morceaux qui se chevauchent fournissent une transition plus fluide des informations entre les segments consécutifs, permettant au modèle de maintenir une compréhension plus cohérente du texte. +2. ** Améliorer la précision de la récupération: ** En augmentant la probabilité de capturer toutes les informations pertinentes dans le top k cible k récupéré, le chevauchement contribue à des réponses plus précises et plus appropriées. + +### Précision vs coût + +Ainsi, pour optimiser davantage le compromis entre la précision de la récupération et le coût, deux stratégies primaires peuvent être utilisées: + +1. ** Le chevauchement d'augmentation / diminution du morceau: ** L'ajustement du pourcentage de chevauchement pendant la division de texte permet un contrôle à grain fin sur la quantité de contexte partagé entre les morceaux. Des pourcentages de chevauchement plus élevés entraînent généralement une amélioration de la préservation du contexte, mais peuvent également augmenter les coûts, car vous devez utiliser plus de morceaux pour englober l'ensemble du document. À l'inverse, des pourcentages de chevauchement plus faibles peuvent réduire les coûts, mais risquent de perdre des informations contextuelles clés entre les morceaux, conduisant potentiellement à des réponses moins précises ou incomplètes du LLM. +2. ** Augmentation / diminution du top k: ** L'augmentation de la valeur K supérieure par défaut (4) élargit le nombre de morceaux considérés pour la génération de réponse. Bien que cela puisse améliorer la précision, cela augmente également les coûts. + +** Astuce: ** Le choix des valeurs optimales ** des chevauchement ** et ** Top K ** dépend de facteurs tels que la complexité du document, les caractéristiques du modèle d'intégration et l'équilibre souhaité entre la précision et le coût. L'expérimentation avec ces valeurs est importante pour trouver la configuration idéale pour un besoin spécifique. + +## 5. Traitez vos données + +Une fois que vous êtes satisfait du processus de chasse, il est temps de traiter vos données. + +
    + +
    + +Après avoir traité vos données, vous conservez la possibilité d'affiner des morceaux individuels en supprimant ou en ajoutant du contenu. Ce contrôle granulaire offre plusieurs avantages: + +* ** Précision améliorée: ** Identifier et rectifier les inexactitudes ou les incohérences présentes dans les données d'origine, garantissant que les informations utilisées dans votre application sont fiables. +* ** Amélioration de la pertinence: ** Affinez le contenu de morceaux pour souligner les informations clés et supprimer des sections non pertinentes, augmentant ainsi la précision et l'efficacité de votre processus de récupération. +* ** Optimisation des requêtes: ** Taigor Chunks pour mieux s'aligner sur les requêtes utilisateur prévues, ce qui les rend plus ciblés et améliore l'expérience utilisateur globale. + +## 6. Configurez le processus Upsert + +Avec nos données correctement traitées - chargées via un chargeur de documents et de manière appropriée - nous pouvons maintenant procéder à la configuration du processus Upsert. + +
    + +Le processus Upsert comprend trois étapes fondamentales: + +* ** Incorporation: ** Nous commençons par choisir le modèle d'intégration approprié pour coder notre ensemble de données. Ce modèle transformera nos données en une représentation vectorielle numérique. +* ** Store vectoriel: ** Ensuite, nous déterminons le magasin vectoriel où résidera notre ensemble de données. +* ** Record Manager (facultatif): ** Enfin, nous avons la possibilité d'implémenter un gestionnaire d'enregistrements. Ce composant fournit les fonctionnalités pour gérer notre ensemble de données une fois qu'il est stocké dans le magasin vectoriel. + +
    + +### Étape 1: Sélectionnez des incorporations + +Cliquez sur la carte "Sélectionner des incorporations" et choisissez votre préférée[embedding model](../integrations/langchain/embeddings/). Dans notre cas, nous sélectionnerons OpenAI comme fournisseur d'incorporation et utiliserons le`text-embedding-ada-002`modéliser avec`1536`dimensions. + +L'intégration est le processus de conversion du texte en une représentation numérique qui capture sa signification. Cette représentation numérique, également appelée vecteur d'intégration, est un tableau de nombres multidimensionnel, où chaque dimension représente un aspect spécifique de la signification du texte. + +Ces vecteurs permettent aux LLM de comparer et de rechercher des morceaux de texte similaires dans le magasin vectoriel en mesurant la distance ou la similitude entre eux dans cet espace multidimensionnel. + +#### Comprendre les dimensions des intégres / vectoriels + +Le nombre de dimensions dans un indice de magasin vectoriel est déterminé par le modèle d'incorporation utilisé lorsque nous augmentons nos données, et vice versa. Chaque dimension représente une fonction ou un concept spécifique dans les données. Par exemple, une ** dimension ** pourrait ** représenter un sujet, un sentiment ou un autre aspect particulier du texte **. + +Plus nous utilisons de dimensions pour intégrer nos données, plus le potentiel de capture de sens nuancé de notre texte est grand. Cependant, cette augmentation se fait au prix des exigences de calcul plus élevées par requête. + +En général, un plus grand nombre de dimensions nécessite plus de ressources pour stocker, traiter et comparer les vecteurs d'intégration résultants. Par conséquent, des modèles intégrés comme le Google`embedding-001`, qui utilise 768 dimensions, sont, en théorie, moins chères que d'autres comme l'Openai`text-embedding-3-large`, avec 3072 dimensions. + +Il est important de noter que la ** relation entre les dimensions et la capture de sens n'est pas strictement linéaire **; Il y a un point de rendement décroissant où l'ajout de dimensions offre un avantage négligeable pour le coût inutile supplémentaire. + +{% hint style = "avertissement"%} +Pour garantir la compatibilité entre un modèle d'incorporation et un indice de magasin vectoriel, l'alignement dimensionnel est essentiel. Les deux ** Le modèle d'intégration et l'indice du magasin vectoriel doivent avoir le même nombre de dimensions **. L'inadéquation de la dimensionnalité entraînera des erreurs de mise en service, car le magasin vectoriel est conçu pour gérer les vecteurs d'une taille spécifique déterminée par le modèle d'incorporation choisi. +{% EndHint%} + +
    + +### Étape 2: Sélectionnez le magasin vectoriel + +Cliquez sur la carte "Sélectionnez Vector Store" et choisissez votre préféré[Vector Store](../integrations/langchain/vector-stores/). Dans notre cas, comme nous avons besoin d'une option prête pour la production, nous sélectionnerons Upstash. + +Vector Store est un type spécial de base de données qui est utilisé pour stocker les incorporations vectorielles. Nous pouvons par des paramètres finetune comme "** top k **" qui détermine le nombre maximum des morceaux les plus similaires qui sont récupérés du magasin vectoriel en réponse à une requête. + +{% hint style = "info"%} +Une valeur K supérieure inférieure donnera des résultats moins, mais potentiellement plus pertinents, tandis qu'une valeur plus élevée renverra une gamme plus large de résultats, capturant potentiellement plus d'informations. +{% EndHint%} + +
    + +### Étape 3: Sélectionnez Record Manager + +Record Manager est un ajout facultatif mais incroyablement utile à notre flux de mise en valeur. Il nous permet de maintenir des enregistrements de tous les morceaux qui ont été renversés vers notre magasin vectoriel, nous permettant d'ajouter ou de supprimer efficacement des morceaux au besoin. + +En d'autres termes, toute modification de vos documents lors d'un nouvel upsert n'entraînera pas les intérêts de vecteur en double stockés dans le magasin vectoriel. + +Des instructions détaillées sur la façon de configurer et d'utiliser cette fonctionnalité peuvent être trouvées dans le dédié[guide](../integrations/langchain/record-managers.md). + +
    + +## 7. Upser vos données à un magasin vectoriel + +Pour commencer le processus Upsert et transférer vos données dans le magasin vectoriel, cliquez sur le bouton "Upsert". + +
    + +Comme illustré dans l'image ci-dessous, nos données ont été déposées avec succès dans la base de données Vector Upstash. Les données ont été divisées en 85 morceaux pour optimiser le processus de mise en service et assurer un stockage et une récupération efficaces. + +
    + +## 8. Testez votre ensemble de données + +Pour tester rapidement les fonctionnalités de votre ensemble de données sans vous éloigner du magasin de documents, utilisez simplement le bouton "Retrouver Requête". Cela initie une requête de test, vous permettant de vérifier la précision et l'efficacité de votre processus de récupération de données. + +
    + +Dans notre cas, nous voyons que lorsque vous interrogez pour des informations sur la couverture des revêtements de sol de la cuisine dans notre police d'assurance, nous récupérons 4 morceaux pertinents de Upstash, notre magasin vectoriel désigné. Cette récupération est limitée à 4 morceaux selon le paramètre "Top K" défini, garantissant que nous recevons les informations les plus pertinentes sans redondance inutile. + +
    + +## 9. Testez votre chiffon + +Enfin, notre système de génération (RAG) (RAG) de la récupération est opérationnel. Il convient de noter comment le LLM interprète efficacement la requête et exploite avec succès les informations pertinentes des données en morceaux pour construire une réponse complète. + +#### Agent Flow + +Avec un nœud d'agent, vous pouvez ajouter la boutique de documents: + +
    + +
    + +Ou se connecter directement à la base de données vectorielle et au mode d'intégration: + +
    + +#### Chatflow + +Vous pouvez utiliser le magasin vectoriel configuré plus tôt: + +
    + +Ou utilisez la boutique de documents (vecteur): + +
    + +## 10. API + +Il existe également une prise en charge des API pour la création, la mise à jour et la suppression de la boutique de documents. Dans cette section, nous allons mettre en évidence les 2 des API les plus utilisées: + +* Ascension +* Rafraîchir + +Pour plus de détails, voir le[Document Store API Reference](../api-reference/document-store.md). + +### API Upsert + +Il existe quelques scénarios différents pour l'amélioration du processus, et chacun a des résultats différents. + +#### Scénario 1: Dans le même magasin de documents, utilisez une configuration de chargeur de document existante, Upsert comme nouveau chargeur de documents. + +
    + +{% Hint Style = "Success"%} +**`docId`** représente l'ID de chargeur de document existant. Il est nécessaire dans le corps de la demande pour ce scénario. +{% EndHint%} + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +import json + +DOC_STORE_ID = "your_doc_store_id" +DOC_LOADER_ID = "your_doc_loader_id" +API_URL = f"http://localhost:3000/api/v1/document-store/upsert/{DOC_STORE_ID}" +API_KEY = "your_api_key_here" + +form_data = { + "files": ('my-another-file.pdf', open('my-another-file.pdf', 'rb')) +} + +body_data = { + "docId": DOC_LOADER_ID +} + +headers = { + "Authorization": f"Bearer {BEARER_TOKEN}" +} + +def query(form_data): + response = requests.post(API_URL, files=form_data, data=body_data, headers=headers) + print(response) + return response.json() + +output = query(form_data) +print(output) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +const DOC_STORE_ID = "your_doc_store_id" +const DOC_LOADER_ID = "your_doc_loader_id" + +let formData = new FormData(); +formData.append("files", input.files[0]); +formData.append("docId", DOC_LOADER_ID) + +async function query(formData) { + const response = await fetch( + `http://localhost:3000/api/v1/document-store/upsert/${DOC_STORE_ID}`, + { + method: "POST", + headers: { + "Authorization": "Bearer " + }, + body: formData + } + ); + const result = await response.json(); + return result; +} + +query(formData).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +#### Scénario 2: Dans le même magasin de documents, remplacez un chargeur de document existant par de nouveaux fichiers. + +
    + +{% Hint Style = "Success"%} +**`docId`** et **`replaceExisting`** sont tous deux requis dans le corps de la demande pour ce scénario. +{% EndHint%} + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +import json + +DOC_STORE_ID = "your_doc_store_id" +DOC_LOADER_ID = "your_doc_loader_id" +API_URL = f"http://localhost:3000/api/v1/document-store/upsert/{DOC_STORE_ID}" +API_KEY = "your_api_key_here" + +form_data = { + "files": ('my-another-file.pdf', open('my-another-file.pdf', 'rb')) +} + +body_data = { + "docId": DOC_LOADER_ID, + "replaceExisting": True +} + +headers = { + "Authorization": f"Bearer {BEARER_TOKEN}" +} + +def query(form_data): + response = requests.post(API_URL, files=form_data, data=body_data, headers=headers) + print(response) + return response.json() + +output = query(form_data) +print(output) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +const DOC_STORE_ID = "your_doc_store_id"; +const DOC_LOADER_ID = "your_doc_loader_id"; + +let formData = new FormData(); +formData.append("files", input.files[0]); +formData.append("docId", DOC_LOADER_ID); +formData.append("replaceExisting", true); + +async function query(formData) { + const response = await fetch( + `http://localhost:3000/api/v1/document-store/upsert/${DOC_STORE_ID}`, + { + method: "POST", + headers: { + "Authorization": "Bearer " + }, + body: formData + } + ); + const result = await response.json(); + return result; +} + +query(formData).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +#### Scénario 3: dans le même magasin de documents, Upsert que le nouveau chargeur de documents à partir de zéro. + +
    + +{% Hint Style = "Success"%} +**`loader`, `splitter`, `embedding`, `vectorStore`** sont tous requis dans le corps de la demande pour ce scénario. **`recordManager`** est facultatif. +{% EndHint%} + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +import json + +DOC_STORE_ID = "your_doc_store_id" +API_URL = f"http://localhost:3000/api/v1/document-store/upsert/{DOC_STORE_ID}" +API_KEY = "your_api_key_here" + +form_data = { + "files": ('my-another-file.pdf', open('my-another-file.pdf', 'rb')) +} + +loader = { + "name": "pdfFile", + "config": {} # you can leave empty to use default config +} + +splitter = { + "name": "recursiveCharacterTextSplitter", + "config": { + "chunkSize": 1400, + "chunkOverlap": 100 + } +} + +embedding = { + "name": "openAIEmbeddings", + "config": { + "modelName": "text-embedding-ada-002", + "credential": + } +} + +vectorStore = { + "name": "pinecone", + "config": { + "pineconeIndex": "exampleindex", + "pineconeNamespace": "examplenamespace", + "credential":
    + +{% Hint Style = "Success"%} +**`createNewDocStore`** et **`docStore`** sont tous deux requis dans le corps de la demande pour ce scénario. +{% EndHint%} + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +import json + +DOC_STORE_ID = "your_doc_store_id" +DOC_LOADER_ID = "your_doc_loader_id" +API_URL = f"http://localhost:3000/api/v1/document-store/upsert/{DOC_STORE_ID}" +API_KEY = "your_api_key_here" + +form_data = { + "files": ('my-another-file.pdf', open('my-another-file.pdf', 'rb')) +} + +body_data = { + "docId": DOC_LOADER_ID, + "createNewDocStore": True, + "docStore": json.dumps({"name":"My NEW Doc Store"}) +} + +headers = { + "Authorization": f"Bearer {BEARER_TOKEN}" +} + +def query(form_data): + response = requests.post(API_URL, files=form_data, data=body_data, headers=headers) + print(response) + return response.json() + +output = query(form_data) +print(output) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +const DOC_STORE_ID = "your_doc_store_id"; +const DOC_LOADER_ID = "your_doc_loader_id"; + +let formData = new FormData(); +formData.append("files", input.files[0]); +formData.append("docId", DOC_LOADER_ID); +formData.append("createNewDocStore", true); +formData.append("docStore", JSON.stringify({ "name": "My NEW Doc Store" })); + +async function query(formData) { + const response = await fetch( + `http://localhost:3000/api/v1/document-store/upsert/${DOC_STORE_ID}`, + { + method: "POST", + headers: { + "Authorization": "Bearer " + }, + body: formData + } + ); + const result = await response.json(); + return result; +} + +query(formData).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +#### Q: Où trouver l'ID de la boutique de documents et l'ID de chargeur de documents? + +R: Vous pouvez trouver les ID respectifs de l'URL. + +
    + +#### Q: Où puis-je trouver les configurations disponibles pour remplacer? + +A: Vous pouvez trouver les configurations disponibles à partir du bouton API ** Affichage ** sur chaque chargeur de document: + +
    + +
    + +Pour chaque usiser, il y a 5 éléments impliqués: + +* **`loader`** +* **`splitter`** +* **`embedding`** +* **`vectorStore`** +* **`recordManager`** + +Vous pouvez remplacer la configuration existante avec le **`config`** Corps de l'élément. Par exemple, en utilisant la capture d'écran ci-dessus, vous pouvez créer un nouveau chargeur de document avec un nouveau **`url`**: + +{% Tabs%} +{% tab title = "python"%} +```python +import requests + +API_URL = "http://localhost:3000/api/v1/document-store/upsert/" + +def query(payload): + response = requests.post(API_URL, json=payload) + return response.json() + +output = query({ + "docId": , + # override existing configuration + "loader": { + "config": { + "url": "https://new-url.com" + } + } +}) +print(output) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/document-store/upsert/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "docId": , + // override existing configuration + "loader": { + "config": { + "url": "https://new-url.com" + } + } +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +Et si le chargeur a le téléchargement de fichiers? Oui, vous l'avez bien deviné, nous devons utiliser les données de formulaire comme corps! + +En utilisant l'image ci-dessous comme exemple, nous pouvons remplacer le **`usage`** Paramètre du chargeur de fichiers PDF comme tel: + +
    + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +import json + +API_URL = "http://localhost:3000/api/v1/document-store/upsert/" +API_KEY = "your_api_key_here" + +form_data = { + "files": ('my-another-file.pdf', open('my-another-file.pdf', 'rb')) +} + +override_loader_config = { + "config": { + "usage": "perPage" + } +} + +body_data = { + "docId": , + "loader": json.dumps(override_loader_config) # Override existing configuration +} + +headers = { + "Authorization": f"Bearer {BEARER_TOKEN}" +} + +def query(form_data): + response = requests.post(API_URL, files=form_data, data=body_data, headers=headers) + print(response) + return response.json() + +output = query(form_data) +print(output) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +const DOC_STORE_ID = "your_doc_store_id"; +const DOC_LOADER_ID = "your_doc_loader_id"; + +const overrideLoaderConfig = { + "config": { + "usage": "perPage" + } +} + +let formData = new FormData(); +formData.append("files", input.files[0]); +formData.append("docId", DOC_LOADER_ID); +formData.append("loader", JSON.stringify(overrideLoaderConfig)); + +async function query(formData) { + const response = await fetch( + `http://localhost:3000/api/v1/document-store/upsert/${DOC_STORE_ID}`, + { + method: "POST", + headers: { + "Authorization": "Bearer " + }, + body: formData + } + ) + const result = await response.json(); + return result; +} + +query(formData).then((response) => { + console.log(response); +});e +``` +{% endtab%} +{% endtabs%} + +#### Q: Quand utiliser les données de formulaire vs JSON comme le corps de la demande d'API? + +R: pour[Document Loaders](../integrations/langchain/document-loaders/)qui ont des fonctionnalités de téléchargement de fichiers, telles que PDF, DOCX, TXT, etc., le corps doit être envoyé sous forme de données de formulaire. + +{% hint style = "avertissement"%} +Assurez-vous que le type de fichier envoyé est compatible avec le type de fichier attendu à partir du chargeur de documents. + +Par exemple, si un[PDF File Loader](../integrations/langchain/document-loaders/pdf-file.md)est utilisé, vous ne devriez envoyer que **. PDF ** Fichiers. + +Pour éviter d'avoir des chargeurs séparés pour différents types de fichiers, nous vous recommandons d'utiliser[File Loader](../integrations/langchain/document-loaders/file-loader.md) +{% EndHint%} + +{% Tabs%} +{% Tab Title = "Python API"%} +```python +import requests +import json + +API_URL = "http://localhost:3000/api/v1/document-store/upsert/" + +# use form data to upload files +form_data = { + "files": ('my-another-file.pdf', open('my-another-file.pdf', 'rb')) +} + +body_data = { + "docId": +} + +def query(form_data): + response = requests.post(API_URL, files=form_data, data=body_data) + print(response) + return response.json() + +output = query(form_data) +print(output) +``` +{% endtab%} + +{% tab title = "JavaScript api"%} +```javascript +// use FormData to upload files +let formData = new FormData(); +formData.append("files", input.files[0]); +formData.append("docId", ); + +async function query(formData) { + const response = await fetch( + "http://localhost:3000/api/v1/document-store/upsert/", + { + method: "POST", + body: formData + } + ); + const result = await response.json(); + return result; +} + +query(formData).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +Pour d'autres[Document Loaders](https://docs.flowiseai.com/integrations/langchain/document-loaders)Nœuds sans télécharger la fonctionnalité du fichier, le corps de l'API est au format ** json **: + +{% Tabs%} +{% Tab Title = "Python API"%} +```python +import requests + +API_URL = "http://localhost:3000/api/v1/document-store/upsert/" + +def query(payload): + response = requests.post(API_URL, json=payload) + return response.json() + +output = query({ + "docId": +}) +print(output) +``` +{% endtab%} + +{% tab title = "JavaScript api"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/document-store/upsert/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "docId": +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +#### Q: Puis-je ajouter de nouvelles métadonnées? + +R: Vous pouvez fournir de nouvelles métadonnées en passant le **`metadata`** À l'intérieur de la demande du corps: + +```json +{ + "docId": , + "metadata": { + "source: "abc" + } +} +``` + +### API de rafraîchissement + +Souvent, vous voudrez peut-être revoir tous les chargeurs de documents dans le magasin de documents pour récupérer les dernières données, et Upsert to Vector Store, pour tout garder en synchronisation. Cela peut être fait via une API de rafraîchissement: + +{% Tabs%} +{% Tab Title = "Python API"%} +```python +import requests + +API_URL = "http://localhost:3000/api/v1/document-store/refresh/" + +def query(): + response = requests.post(API_URL) + return response.json() + +output = query() +print(output) +``` +{% endtab%} + +{% tab title = "JavaScript api"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/document-store/refresh/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + } + } + ); + const result = await response.json(); + return result; +} + +query().then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +Vous pouvez également remplacer la configuration existante du chargeur de documents spécifique: + +{% Tabs%} +{% Tab Title = "Python API"%} +```python +import requests + +API_URL = "http://localhost:3000/api/v1/document-store/refresh/" + +def query(payload): + response = requests.post(API_URL, json=payload) + return response.json() + +output = query( +{ + "items": [ + { + "docId": , + "splitter": { + "name": "recursiveCharacterTextSplitter", + "config": { + "chunkSize": 2000, + "chunkOverlap": 100 + } + } + } + ] +} +) +print(output) +``` +{% endtab%} + +{% tab title = "JavaScript api"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/document-store/refresh/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "items": [ + { + "docId": , + "splitter": { + "name": "recursiveCharacterTextSplitter", + "config": { + "chunkSize": 2000, + "chunkOverlap": 100 + } + } + } + ] +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +## 11. Résumé + +Nous avons commencé par créer un magasin de documents pour organiser les données politiques de LibertyGuard Deluxe Homeowners. Ces données ont ensuite été préparées en téléchargeant, en se répandant, en les traitementant et en les faisant augmenter, ce qui le prépare pour notre système de chiffon. + +** Avantages du magasin de documents: ** + +Les magasins de documents offrent plusieurs avantages pour la gestion et la préparation des données pour la récupération des systèmes de génération augmentée (RAG): + +* ** Organisation et gestion: ** Ils fournissent un emplacement central pour stocker, gérer et préparer vos données. +* ** Qualité des données: ** Le processus de chasse aide à structurer les données pour une récupération et une analyse précises. +* ** Flexibilité: ** Les magasins de documents permettent d'affiner et d'ajuster les données au besoin, en améliorant la précision et la pertinence de votre système de chiffon. + +## 12. Tutoriels vidéo + +### Raging Like a Boss - Flowise Document Store Tutorial + +Dans cette vidéo,[Leon](https://youtube.com/@leonvanzyl)Fournit un tutoriel étape par étape sur l'utilisation des magasins de documents pour gérer facilement vos bases de connaissances de chiffon dans FlowiSeai. + +{% embed url = "https://youtu.be/plusfakohoa"%} diff --git a/fr/using-flowise/embed.md b/fr/using-flowise/embed.md new file mode 100644 index 00000000..984c67e3 --- /dev/null +++ b/fr/using-flowise/embed.md @@ -0,0 +1,304 @@ +--- +description: Learn how to customize and embed our chat widget +--- + +# Encombrer + +*** + +Vous pouvez facilement ajouter le widget de chat à votre site Web. Copiez simplement le script de widget fourni et collez-le entre le``et``Tags de votre fichier HTML. + +
    + +## Configuration du widget + +La vidéo suivante montre comment injecter le script de widget dans n'importe quelle page Web. + +{% embed url = "https://github.com/flowiseai/flowise/assets/26460777/c128829a-2d08-4d60-b821-1e41a9e677d0"%} + +## En utilisant une version spécifique + +Vous pouvez spécifier la version de Flowise-Embed's`web.js`à utiliser. Pour la liste complète des versions:[https://www.npmjs.com/package/flowise-embed](https://www.npmjs.com/package/flowise-embed) + +```html + +``` + +{% hint style = "avertissement"%} +Dans Flowise ** v2.1.0 **, nous avons modifié le fonctionnement du streaming. Si votre version Flowise est inférieure à cela, vous pouvez trouver votre chatbot intégré à recevoir des messages. + +Vous pouvez soit mettre à jour le flux vers ** v2.1.0 ** et au-dessus + +Ou, si pour une raison quelconque, vous préférez ne pas mettre à jour Flowise, vous pouvez spécifier la dernière version ** v1.x.x ** de[Flowise-Embed](https://www.npmjs.com/package/flowise-embed?activeTab=versions). Dernière maintenue`web.js`La version est ** v1.3.14. ** + +Par exemple: + +`https://cdn.jsdelivr.net/npm/flowise-embed@1.3.14/dist/web.js` +{% EndHint%} + +## ConfigFlow Config + +Tu peux passer`chatflowConfig`Objet JSON pour remplacer la configuration existante. C'est la même chose que[Broken link](broken-reference "mention")en API. + +```html + +``` + +## Configuration d'observateur + +Cela vous permet d'exécuter du code dans le parent en fonction des observations de signal dans le chatbot. + +```html + +``` + +## Thème + +Vous pouvez modifier l'apparence complète du chatbot intégré et activer les fonctionnalités telles que les infractions, les avertissements, les messages de bienvenue personnalisés, et plus en utilisant la propriété du thème. Cela vous permet de personnaliser profondément l'apparence du widget, notamment: + +* ** Bouton: ** Position, taille, couleur, icône, comportement de glisser-déposer et d'ouverture automatique. +* ** Infiltration: ** Visibilité, texte du message, couleur d'arrière-plan, couleur du texte et taille de police. +* ** Avis de non-responsabilité: ** Titre, message, couleurs pour le texte, les boutons et l'arrière-plan, y compris une option de superposition floue. +* ** Fenêtre de chat: ** Titre, agent / affichage des messages utilisateur, messages de bienvenue / d'erreur, couleur / image d'arrière-plan, dimensions, taille de police, invites de démarrage, rendu HTML, style de message (couleurs, avatars), comportement d'entrée de texte (Couleurs, limites de caractère, sons), options de rétroaction, affichage de date / temps et personnalisation de la page. +* ** CSSS personnalisé: ** Injectez directement le code CSS pour un contrôle encore plus fin sur l'apparence, remplacement des styles par défaut au besoin ([see the instructions guide below](embed.md#custom-css-modification)) + +```html + +``` + +** Remarque: ** Voir plein[configuration list](https://github.com/FlowiseAI/FlowiseChatEmbed#configuration) + +## Modification du code personnalisé + +Pour modifier le code source complet du widget de chat embarqué, suivez ces étapes: + +1. Fourchez le[Flowise Chat Embed](https://github.com/FlowiseAI/FlowiseChatEmbed)dépôt +2. Courir`yarn install`Pour installer les dépendances nécessaires +3. Ensuite, vous pouvez apporter des modifications au code +4. Courir`yarn build`Pour récupérer les changements +5. Poussez les modifications au référentiel fourchu +6. Vous pouvez ensuite utiliser votre personnalité`web.js`comme un chat intégré comme ça: + +Remplacer`username`à votre nom d'utilisateur GitHub, et`forked-repo`à votre dépôt fourchu. + +
       
    +```
    +
    +{% hint style = "info"%}
    +Une alternative à jsdelivr est un peu. Voici un exemple:
    +
    +
       https://unpkg.com/flowise-embed/dist/web.js
    +  
    +{% EndHint%} + +## Modification CSS personnalisée + +Vous pouvez désormais ajouter directement des CSS personnalisés pour styliser votre widget de chat intégré, éliminant le besoin de personnalité`web.js`Fichiers (nécessite v2.0.8 ou version ultérieure). Cela vous permet de: + +* Donnez à chaque chatbot intégré un aspect et une sensation uniques +* Utilisez le fonctionnaire`web.js`—Les versions ou hébergeurs plus personnalisés ne sont nécessaires pour le style +* Mettre à jour les styles instantanément + +Voici comment l'utiliser: + +```html + + +``` + +## Cors + +Lorsque vous utilisez un widget de chat intégré, il y a une chance que vous puissiez faire face à un problème COR comme: + +{% hint style = "danger"%} +L'accès à la récupération à 'https: // \ / api / v1 / prédiction /' From Origin 'https: // \ ' n'a été bloqué par la politique CORS: pas de «Access-Control-Allow-origin» Header n'est présent sur la ressource demandée. +{% EndHint%} + +Pour le réparer, spécifiez les variables d'environnement suivantes: + +``` +CORS_ORIGINS=* +IFRAME_ORIGINS=* +``` + +Par exemple, si vous utilisez`npx flowise start` + +``` +npx flowise start --CORS_ORIGINS=* --IFRAME_ORIGINS=* +``` + +Si vous utilisez Docker, placez les variables Env à l'intérieur`Flowise/docker/.env` + +Si vous utilisez un clone Git local, placez les variables Env à l'intérieur`Flowise/packages/server/.env` + +## Tutoriels vidéo + +Ces deux vidéos vous apprendront à intégrer le widget Flowise dans un site Web. + +{% embed url = "https://youtu.be/4paq2wobdq4"%} + +{% embed url = "https://youtu.be/xoecv1xyn48"%} diff --git a/fr/using-flowise/evaluations.md b/fr/using-flowise/evaluations.md new file mode 100644 index 00000000..30220f57 --- /dev/null +++ b/fr/using-flowise/evaluations.md @@ -0,0 +1,104 @@ +# Évaluations + +{% hint style = "info"%} +Les évaluations ne sont disponibles que pour le cloud et le plan d'entreprise +{% EndHint%} + +Les évaluations vous aident à surveiller et à comprendre les performances de votre application ChatFlow / AgentFlow. Au niveau élevé, une évaluation est un processus qui prend un ensemble d'entrées et de sorties correspondantes de votre ChatFlow / AgentFlow, et génère des scores. Ces scores peuvent être dérivés en comparant les sorties aux résultats de référence, tels que par correspondance de cordes, une comparaison numérique ou même en tirant parti d'un LLM en tant que juge. Ces évaluations sont effectuées à l'aide de ensembles de données et d'évaluateurs. + +## Ensembles de données + +Les ensembles de données sont les entrées qui seront utilisées pour exécuter votre ChatFlow / AgentFlow, ainsi que les sorties correspondantes pour la comparaison. L'utilisateur peut ajouter manuellement l'entrée et la sortie prévue ou télécharger un fichier CSV avec 2 colonnes: entrée et sortie. + +
    + +| Entrée | Sortie | +| --------------------------------- | ---------------------------- | +| Quelle est la capitale du Royaume-Uni | La capitale du Royaume-Uni est Londres | +| Combien de jours y a-t-il dans un an | Il y a 365 jours par an | + +## Évaluateurs + +Les évaluateurs sont comme des tests unitaires. Au cours d'une évaluation, les entrées des ensembles de données sont exécutées sur les flux sélectionnés et les sorties sont évaluées à l'aide d'évaluateurs sélectionnés. Il existe 3 types d'évaluateurs: + +* ** basé sur le texte **: Vérification basée sur les chaînes: + * Contient tout + * Contient tout + * Ne contient aucun + * Ne contient pas tout + * Commence par + * Ne commence pas par + +
    + +* ** Basée numérique: ** Type de nombres Vérification: + * Jetons totaux + * Jetons rapides + * Jetons d'achèvement + * Latence API + * Latence LLM + * Latence de chatflow + * LAFENCE DE LA FLOW Agent (à venir) + * Longueur de caractères de sortie + +
    + +* ** LLM basé sur LLM **: Utilisation d'un autre LLM pour noter la sortie + * Hallucination + * Correction + +
    + +## Évaluations + +Maintenant que nous avons préparé des ensembles de données et des évaluateurs, nous pouvons commencer à exécuter une évaluation. + +1.) Sélectionnez DataSet et ChatFlow pour évaluer. Vous pouvez sélectionner plusieurs ensembles de données et chatflows. En utilisant l'exemple ci-dessous, toutes les entrées de DataSet1 seront exécutées contre 2 ChatFlows. Étant donné que DataSet1 a 2 entrées, un total de 4 sorties seront produites et évaluées. + +
    + +2.) Sélectionnez les évaluateurs. Seuls des évaluateurs basés sur des chaînes et basés sur des chaînes sont disponibles pour être sélectionnés à ce stade. + +
    + +3.) (Facultatif) Sélectionnez l'évaluateur basé sur LLM. Démarrer l'évaluation: + +
    + +4.) Attendez que l'évaluation soit terminée: + +
    + +5.) Une fois l'évaluation terminée, cliquez sur l'icône du graphique sur le côté droit pour afficher les détails: + +
    + +Les 3 graphiques ci-dessus montrent le résumé de l'évaluation: + +* Tarif de réussite / échouer +* Invite moyen et jetons d'achèvement utilisés +* Latence moyenne de la demande + +Le tableau sous les graphiques montre les détails de chaque exécution. + +
    + +
    + +### Réévaluer une réévaluation + +Lorsque les flux utilisés sur l'évaluation ont été mis à jour / modifiés, un message d'avertissement sera affiché: + +
    + +Vous pouvez réexaminer la même évaluation en utilisant le bouton d'évaluation à nouveau dans le coin supérieur droit. Vous pourrez voir les différentes versions: + +
    + +Vous pouvez également afficher et comparer les résultats de différentes versions: + +
    + +## Tutoriel vidéo + +{% embed url = "https://youtu.be/kgutthmkgfg?si=3rlplep_0ti0p6uv&t=486"%} diff --git a/fr/using-flowise/monitoring.md b/fr/using-flowise/monitoring.md new file mode 100644 index 00000000..2ff9b771 --- /dev/null +++ b/fr/using-flowise/monitoring.md @@ -0,0 +1,97 @@ +# Surveillance + +Flowise a un support natif pour Prometheus avec Grafana et Opentelémétrie. Cependant, seules des mesures de haut niveau telles que les demandes d'API, les dénombrements de flux / prédictions sont suivis. Référer[here](https://github.com/FlowiseAI/Flowise/blob/main/packages/server/src/Interface.Metrics.ts#L13)Pour les listes de contre-métriques. Pour plus de détails, l'observabilité du nœud par nœud, nous vous recommandons d'utiliser[Analytic](broken-reference). + +## Prométhée + +[Prometheus](https://prometheus.io/)est une solution de surveillance et d'alerte open source. + +Avant de configurer Prometheus, configurez les variables Env suivantes dans Flowise: + +```properties +ENABLE_METRICS=true +METRICS_PROVIDER=prometheus +METRICS_INCLUDE_NODE_METRICS=true +``` + +Une fois ProTheus installé, exécutez-le à l'aide d'un fichier de configuration. Flowise fournit un fichier de configuration par défaut qui peut être trouvé[here](https://github.com/FlowiseAI/Flowise/blob/main/metrics/prometheus/prometheus.config.yml). + +N'oubliez pas d'avoir une instance fluide également en cours d'exécution. Vous pouvez ouvrir le navigateur et naviguer vers le port 9090. Dans le tableau de bord, vous devriez pouvoir voir le point de terminaison métrique -`/api/v1/metrics`est maintenant en direct. + +
    + +Par défaut,`/api/v1/metrics`est disponible pour Prometheus pour tirer les mesures. + +
    + +## Grafana + +Prométhée recueille des mesures riches et fournit un langage de requête puissant; Grafana transforme les mesures en visualisations significatives. + +Grafana peut être installé de diverses manières. Reportez-vous au[guide](https://grafana.com/docs/grafana/latest/setup-grafana/installation/). + +Grafana expose par défaut le port 9091: + +
    + +Sur la barre gauche, cliquez sur Ajouter une nouvelle connexion et sélectionnez Prometheus: + +
    + +Puisque notre Prometheus sert au port 9090: + +
    + +Faites défiler vers le bas et testez la connexion: + +
    + +Prenez note de l'ID de source de données indiqué dans la barre d'outils, nous en aurons besoin pour créer des tableaux de bord: + +
    + +Maintenant que la connexion est ajoutée avec succès, nous pouvons commencer à ajouter un tableau de bord. Dans la barre gauche, cliquez sur Tableaux de bord et créez un tableau de bord. + +Flowise fournit 2 tableaux de bord de modèle: + +* [grafana.dashboard.app.json.txt](https://github.com/FlowiseAI/Flowise/blob/main/metrics/grafana/grafana.dashboard.app.json.txt): Les mesures API telles que le nombre de ChatFlows / Agentflows, le nombre de prédictions, les outils, l'assistant, les vecteurs renversés, etc. +* [grafana.dashboard.server.json.txt](https://github.com/FlowiseAI/Flowise/blob/main/metrics/grafana/grafana.dashboard.server.json.txt): métriques de l'instance Node.js flowise telle que le tas, le processeur, l'utilisation de la RAM + +Si vous utilisez des modèles ci-dessus, trouvez et remplacez toutes`cds4j1ybfuhogb`avec l'ID de source de données que vous avez créé et enregistré plus tôt. + +
    + +Vous pouvez également choisir d'importer d'abord, puis modifier le JSON plus tard: + +
    + +Maintenant, essayez d'effectuer des actions sur le flux, vous devriez pouvoir voir les métriques affichées: + +
    + +
    + +## OpenTelemetry + +[OpenTelemetry](https://opentelemetry.io/)est un cadre open source pour créer et gérer les données de télémétrie. Pour activer Otel, configurez les variables Env suivantes dans Flowise: + +```properties +ENABLE_METRICS=true +METRICS_PROVIDER=open_telemetry +METRICS_INCLUDE_NODE_METRICS=true +METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT=http://localhost:4318/v1/metrics +METRICS_OPEN_TELEMETRY_PROTOCOL=http # http | grpc | proto (default is http) +METRICS_OPEN_TELEMETRY_DEBUG=true +``` + +Ensuite, nous avons besoin d'un collecteur d'OpenTelemetry pour recevoir, traiter et exporter des données de télémétrie. Flowise fournit un[docker compose file](https://github.com/FlowiseAI/Flowise/blob/main/metrics/otel/compose.yaml)qui peut être utilisé pour démarrer le conteneur collecteur. + +```bash +cd Flowise +cd metrics && cd otel +docker compose up -d +``` + +Le collectionneur utilisera le[otel.config.yml](https://github.com/FlowiseAI/Flowise/blob/main/metrics/otel/otel.config.yml)Fichier sous le même répertoire pour les configurations. Actuellement seulement[Datadog](https://www.datadoghq.com/)et prometheus sont soutenus, référer à[Open Telemetry](https://opentelemetry.io/)Documentation pour configurer différents outils APM tels que Zipkin, Jeaver, New Relic, Splunk et autres. + +Assurez-vous de remplacer par la touche API nécessaire pour les exportateurs dans le fichier YML. diff --git a/fr/using-flowise/prediction.md b/fr/using-flowise/prediction.md new file mode 100644 index 00000000..651b30f4 --- /dev/null +++ b/fr/using-flowise/prediction.md @@ -0,0 +1,1793 @@ +# Prédiction + +L'API de prédiction est le principal critère d'évaluation pour interagir avec vos flux et assistants fluide. Il vous permet d'envoyer des messages à votre flux sélectionné et de recevoir des réponses. Cette API gère la fonctionnalité de chat de base, notamment: + +* ** Interactions de chat **: Envoyez des questions ou des messages à votre flux et recevez des réponses générées par l'AI-AI +* ** Réponses en streaming **: Obtenez des réponses en streaming en temps réel pour une meilleure expérience utilisateur +* ** Mémoire de conversation **: Maintenir le contexte sur plusieurs messages d'une session +* ** Traitement de fichiers **: Télécharger et traiter les images, l'audio et autres fichiers dans le cadre de vos requêtes +* ** Configuration dynamique **: remplacer les paramètres de chat et les variables de passes à l'exécution + +Pour plus de détails, voir le[Prediction Endpoint API Reference](../api-reference/prediction.md). + +## URL de base et authentification + +** URL de base **:`http://localhost:3000`(ou votre URL d'instance fluide) + +** Point de terminaison **:`POST /api/v1/prediction/:id` + +** Authentification **: se référer[Authentication for Flows](../configuration/authorization/chatflow-level.md) + +## Demander le format + +#### Structure de base de la demande + +```json +{ + "question": "Your message here", + "streaming": false, + "overrideConfig": {}, + "history": [], + "uploads": [], + "form": {} +} +``` + +#### Paramètres + +| Paramètre | Type | Requis | Description | +| ---------------- | ------- | --------------------------- | ------------------------------------------- | +| `question`| String | Oui | Le message / question à envoyer au flux | +| `form`| Objet | Soit`question`ou`form`| L'objet formulaire à envoyer à l'écoulement | +| `streaming`| booléen | Non | Activer les réponses en streaming (par défaut: false) | +| `overrideConfig`| Objet | Non | Remplacez la configuration du flux | +| `history`| tableau | Non | Messages de conversation précédents | +| `uploads`| tableau | Non | Fichiers à télécharger (images, audio, etc.) | +| `humanInput`| Objet | Non | Renvoie les commentaires humains et le curriculum vitae | + +## Bibliothèques SDK + +Flowise fournit des SDK officiels pour Python et TypeScript / JavaScript: + +#### Installation + +**Python**:`pip install flowise` + +** TypeScript / JavaScript **:`npm install flowise-sdk` + +#### Utilisation du SDK Python + +{% Tabs%} +{% tab title = "USAGE BASIC"%} +```python +from flowise import Flowise, PredictionData + +# Initialize client +client = Flowise(base_url="http://localhost:3000") + +# Non-streaming prediction +try: + response = client.create_prediction( + PredictionData( + chatflowId="your-chatflow-id", + question="What is machine learning?", + streaming=False + ) + ) + + # Handle response + for result in response: + print("Response:", result) + +except Exception as e: + print(f"Error: {e}") +``` +{% endtab%} + +{% tab title = "streaming"%} +```python +from flowise import Flowise, PredictionData + +client = Flowise(base_url="http://localhost:3000") + +# Streaming prediction +try: + response = client.create_prediction( + PredictionData( + chatflowId="your-chatflow-id", + question="Tell me a long story about AI", + streaming=True + ) + ) + + # Process streaming chunks + print("Streaming response:") + for chunk in response: + print(chunk, end="", flush=True) + +except Exception as e: + print(f"Error: {e}") +``` +{% endtab%} + +{% tab title = "avec configuration"%} +```python +from flowise import Flowise, PredictionData + +client = Flowise(base_url="http://localhost:3000") + +# Advanced configuration +try: + response = client.create_prediction( + PredictionData( + chatflowId="your-chatflow-id", + question="Analyze this data", + streaming=False, + overrideConfig={ + "sessionId": "user-session-123", + "temperature": 0.7, + "maxTokens": 500, + "returnSourceDocuments": True + } + ) + ) + + for result in response: + print("Response:", result) + +except Exception as e: + print(f"Error: {e}") +``` +{% endtab%} +{% endtabs%} + +#### Utilisation du SDK TypeScript / JavaScript + +{% Tabs%} +{% tab title = "USAGE BASIC"%} +```typescript +import { FlowiseClient } from 'flowise-sdk'; + +// Initialize client +const client = new FlowiseClient({ + baseUrl: 'http://localhost:3000' +}); + +// Non-streaming prediction +async function chatWithFlow() { + try { + const response = await client.createPrediction({ + chatflowId: 'your-chatflow-id', + question: 'What is machine learning?', + streaming: false + }); + + console.log('Response:', response); + + } catch (error) { + console.error('Error:', error); + } +} + +chatWithFlow(); +``` +{% endtab%} + +{% tab title = "streaming"%} +```typescript +import { FlowiseClient } from 'flowise-sdk'; + +const client = new FlowiseClient({ + baseUrl: 'http://localhost:3000' +}); + +// Streaming prediction +async function streamingChat() { + try { + const stream = await client.createPrediction({ + chatflowId: 'your-chatflow-id', + question: 'Tell me a long story about AI', + streaming: true + }); + + console.log('Streaming response:'); + for await (const chunk of stream) { + process.stdout.write(chunk); + } + + } catch (error) { + console.error('Error:', error); + } +} + +streamingChat(); +``` +{% endtab%} + +{% tab title = "avec configuration"%} +```typescript +import { FlowiseClient } from 'flowise-sdk'; + +const client = new FlowiseClient({ + baseUrl: 'http://localhost:3000' +}); + +// Advanced configuration +async function advancedChat() { + try { + const response = await client.createPrediction({ + chatflowId: 'your-chatflow-id', + question: 'Analyze this data', + streaming: false, + overrideConfig: { + sessionId: 'user-session-123', + temperature: 0.7, + maxTokens: 500, + returnSourceDocuments: true + } + }); + + console.log('Response:', response); + + } catch (error) { + console.error('Error:', error); + } +} + +advancedChat(); +``` +{% endtab%} +{% endtabs%} + +## Utilisation directe de l'API HTTP + +Si vous préférez utiliser l'API REST directement sans SDKS: + +#### Demande de base + +{% Tabs%} +{% tab title = "python (requêtes)"%} +```python +import requests +import json + +def send_message(chatflow_id, question, streaming=False): + url = f"http://localhost:3000/api/v1/prediction/{chatflow_id}" + + payload = { + "question": question, + "streaming": streaming + } + + headers = { + "Content-Type": "application/json" + } + + try: + response = requests.post(url, json=payload, headers=headers) + response.raise_for_status() # Raise exception for bad status codes + + return response.json() + + except requests.exceptions.RequestException as e: + print(f"Request failed: {e}") + return None + +# Usage +result = send_message( + chatflow_id="your-chatflow-id", + question="What is artificial intelligence?", + streaming=False +) + +if result: + print("Response:", result) +``` +{% endtab%} + +{% tab title = "javascript (fetch)"%} +```javascript +async function sendMessage(chatflowId, question, streaming = false) { + const url = `http://localhost:3000/api/v1/prediction/${chatflowId}`; + + const payload = { + question: question, + streaming: streaming + }; + + try { + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const result = await response.json(); + return result; + + } catch (error) { + console.error('Request failed:', error); + return null; + } +} + +// Usage +sendMessage( + 'your-chatflow-id', + 'What is artificial intelligence?', + false +).then(result => { + if (result) { + console.log('Response:', result); + } +}); +``` +{% endtab%} + +{% tab title = "curl"%} +```bash +curl -X POST "http://localhost:3000/api/v1/prediction/your-chatflow-id" \ + -H "Content-Type: application/json" \ + -d '{ + "question": "What is artificial intelligence?", + "streaming": false + }' +``` +{% endtab%} +{% endtabs%} + +## Fonctionnalités avancées + +### Entrée de formulaire + +Dans AgentFlow v2, vous pouvez sélectionner`form`comme type d'entrée. + +
    + +Vous pouvez remplacer la valeur par nom de variable de l'entrée de formulaire + +```json +{ + "form": { + "title": "Example", + "count": 1, + ... + } +} +``` + +{% Tabs%} +{% tab title = "python"%} +```python +import requests + +def prediction(flow_id, form): + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + payload = { + "form": form + } + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +result = prediction( + flow_id="your-flow-id", + form={ + "title": "ABC", + "choices": "A" + } +) + +print(result) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function prediction(flowId, form) { + const url = `http://localhost:3000/api/v1/prediction/${flowId}`; + + const payload = { + form: form + }; + + try { + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } +} + +prediction( + 'your-flow-id', + { + "title": "ABC", + "choices": "A" + } +).then(result => { + console.log(result); +}); +``` +{% endtab%} +{% endtabs%} + +### Remplacement de la configuration + +Remplacez dynamiquement les paramètres de ChatFlow. + +La configuration de remplacement est ** désactivée ** Par défaut pour des raisons de sécurité. Activez-le en haut à droite: ** Paramètres ** → ** Configuration ** → ** Sécurité ** Tab: + +
    + +{% Tabs%} +{% tab title = "python"%} +```python +import requests + +def query_with_config(flow_id, question, config): + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + payload = { + "question": question, + "overrideConfig": config + } + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Example: Override session and return source documents +result = query_with_config( + flow_id="your-flow-id", + question="How does machine learning work?", + config={ + "sessionId": "user-123", + "temperature": 0.5, + "maxTokens": 1000 + } +) + +print(result) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function queryWithConfig(flowId, question, config) { + const url = `http://localhost:3000/api/v1/prediction/${flowId}`; + + const payload = { + question: question, + overrideConfig: config + }; + + try { + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } +} + +// Example: Override session and return source documents +queryWithConfig( + 'your-flow-id', + 'How does machine learning work?', + { + sessionId: 'user-123', + temperature: 0.5, + maxTokens: 1000 + } +).then(result => { + console.log(result); +}); +``` +{% endtab%} +{% endtabs%} + +Pour`array`Type, survol de l'icône Info affichera le schéma qui peut être remplacé. + +La valeur du tableau de OverRideConfig se concatera avec les valeurs de tableau existantes. Par exemple, si existant`startState`a: + +```json +{ + "key": "key1", + "value": "value1" +} +``` + +Et si nous permettons de remplacer: + +
    + +```json +"overrideConfig": { + "startState": [ + { + "key": "foo", + "value": "bar" + } + ], + "llmMessages": [ + { + "role": "system", + "content": "You are helpful assistant" + } + ] +} +``` + +La finale`startState`sera: + +```json +[ + { + "key": "key1", + "value": "value1" + }, + { + "key": "foo", + "value": "bar" + }, +] +``` + +### Overrifier le nœud spécifique + +Par défaut, si plusieurs nœuds partagent le même type et qu'aucun ID de nœud n'est spécifié, le remplacement d'une propriété mettra à jour cette propriété sur tous les nœuds correspondants. + +Par exemple, il y a 2 nœuds LLM où je souhaite remplacer le message système: + +
    + +Après avoir permis la possibilité de remplacer: + +
    + +Je peux remplacer le message système pour les deux LLM comme ainsi: + +```json +"overrideConfig": { + "llmMessages": [ + { + "role": "system", + "content": "You are sarcastic" + } + ] +} +``` + +Depuis l'exécution, vous pouvez voir le message du système Overriden: + +
    + +
    + +Dans certains cas, vous voudrez peut-être simplement remplacer la configuration pour un nœud spécifique. Vous pouvez le faire en spécifiant l'ID de nœud ** à l'intérieur ** la propriété que vous souhaitez remplacer. + +Par exemple: + +```json +"overrideConfig": { + "llmMessages": { + "llmAgentflow_0": [ + { + "role": "system", + "content": "You are sweet" + } + ], + "llmAgentflow_1": [ + { + "role": "system", + "content": "You are smart" + } + ] + } +} +``` + +Si vous retournez à l'exécution, vous pouvez voir que chaque LLM a la valeur de dépassement correcte: + +
    + +
    + +### Histoire de la conversation + +Fournir un contexte de conversation en incluant des messages précédents dans le tableau d'historique. + +** Format de message d'histoire ** + +```json +{ + "role": "apiMessage" | "userMessage", + "content": "Message content" +} +``` + +{% Tabs%} +{% tab title = "python"%} +```python +import requests + +def chat_with_history(flow_id, question, history): + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + payload = { + "question": question, + "history": history + } + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Example conversation with context +conversation_history = [ + { + "role": "apiMessage", + "content": "Hello! I'm an AI assistant. How can I help you today?" + }, + { + "role": "userMessage", + "content": "Hi, my name is Sarah and I'm learning about AI" + }, + { + "role": "apiMessage", + "content": "Nice to meet you, Sarah! I'd be happy to help you learn about AI. What specific aspects interest you?" + } +] + +result = chat_with_history( + flow_id="your-flow-id", + question="Can you explain neural networks in simple terms?", + history=conversation_history +) + +print(result) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function chatWithHistory(flowId, question, history) { + const url = `http://localhost:3000/api/v1/prediction/${flowId}`; + + const payload = { + question: question, + history: history + }; + + try { + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } +} + +// Example conversation with context +const conversationHistory = [ + { + role: "apiMessage", + content: "Hello! I'm an AI assistant. How can I help you today?" + }, + { + role: "userMessage", + content: "Hi, my name is Sarah and I'm learning about AI" + }, + { + role: "apiMessage", + content: "Nice to meet you, Sarah! I'd be happy to help you learn about AI. What specific aspects interest you?" + } +]; + +chatWithHistory( + 'your-flow-id', + 'Can you explain neural networks in simple terms?', + conversationHistory +).then(result => { + console.log(result); +}); +``` +{% endtab%} +{% endtabs%} + +### Gestion de session + +Utiliser`sessionId`Pour maintenir l'état de conversation sur plusieurs appels d'API. Chaque session maintient son propre contexte de conversation et sa mémoire. + +{% Tabs%} +{% tab title = "python"%} +```python +import requests + +class FlowiseSession: + def __init__(self, flow_id, session_id, base_url="http://localhost:3000"): + self.flow_id= flow_id + self.session_id = session_id + self.base_url = base_url + self.url = f"{base_url}/api/v1/prediction/{flow_id}" + + def send_message(self, question, **kwargs): + payload = { + "question": question, + "overrideConfig": { + "sessionId": self.session_id, + **kwargs.get("overrideConfig", {}) + } + } + + # Add any additional parameters + for key, value in kwargs.items(): + if key != "overrideConfig": + payload[key] = value + + try: + response = requests.post(self.url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Usage +session = FlowiseSession( + flow_id="your-flow-id", + session_id="user-session-123" +) + +# First message +response1 = session.send_message("Hello, my name is John") +print("Response 1:", response1) + +# Second message - will remember the previous context +response2 = session.send_message("What's my name?") +print("Response 2:", response2) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +class FlowiseSession { + constructor(flowId, sessionId, baseUrl = 'http://localhost:3000') { + this.flowId= flowId; + this.sessionId = sessionId; + this.baseUrl = baseUrl; + this.url = `${baseUrl}/api/v1/prediction/${flowId}`; + } + + async sendMessage(question) { + const payload = { + question: question, + overrideConfig: { + sessionId: this.sessionId + } + }; + + try { + const response = await fetch(this.url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } + } +} + +// Usage +const session = new FlowiseSession( + 'your-flow-id', + 'user-session-123' +); + +async function conversationExample() { + // First message + const response1 = await session.sendMessage("Hello, my name is John"); + console.log("Response 1:", response1); + + // Second message - will remember the previous context + const response2 = await session.sendMessage("What's my name?"); + console.log("Response 2:", response2); +} + +conversationExample(); +``` +{% endtab%} +{% endtabs%} + +### Variables + +Passer des variables dynamiques à votre flux en utilisant le`vars`propriété`overrideConfig`. Les variables peuvent être utilisées dans votre flux pour injecter un contenu dynamique. + +{% hint style = "avertissement"%} +Les variables doivent être créées avant de pouvoir la remplacer. Se référer à[Variables](variables.md) +{% EndHint%} + +{% Tabs%} +{% tab title = "python"%} +```python +import requests + +def send_with_variables(flow_id, question, variables): + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + payload = { + "question": question, + "overrideConfig": { + "vars": variables + } + } + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Example: Pass user information and preferences +result = send_with_variables( + flow_id="your-flow-id", + question="Create a personalized workout plan", + variables={ + "user_name": "Alice", + "fitness_level": "intermediate", + "preferred_duration": "30 minutes", + "equipment": "dumbbells, resistance bands", + "goals": "strength training, flexibility" + } +) + +print(result) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function sendWithVariables(flowId, question, variables) { + const url = `http://localhost:3000/api/v1/prediction/${flowId}`; + + const payload = { + question: question, + overrideConfig: { + vars: variables + } + }; + + try { + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } +} + +// Example: Pass user information and preferences +sendWithVariables( + 'your-flow-id', + 'Create a personalized workout plan', + { + user_name: 'Alice', + fitness_level: 'intermediate', + preferred_duration: '30 minutes', + equipment: 'dumbbells, resistance bands', + goals: 'strength training, flexibility' + } +).then(result => { + console.log(result); +}); +``` +{% endtab%} +{% endtabs%} + +### Téléchargements d'images + +Téléchargez des images pour une analyse visuelle lorsque votre flux prend en charge le traitement d'image. Se référer à[Image](uploads.md#image)Pour plus de référence. + +** Structure de téléchargement: ** + +```json +{ + "data": "", + "type": "", + "name": ", + "mime": " +} +``` + +** Données: ** base64 ou URL d'une image + +**Taper**:`url`ou`file` + +** Nom: ** Nom de l'image + +**Mime**:`image/png`, `image/jpeg`, `image/jpg` + +{% Tabs%} +{% Tab Title = "Python (base64)"%} +```python +import requests +import base64 +import os + +def upload_image(flow_id, question, image_path): + # Read and encode image + with open(image_path, 'rb') as image_file: + encoded_image = base64.b64encode(image_file.read()).decode('utf-8') + + # Determine MIME type based on file extension + mime_types = { + '.png': 'image/png', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.gif': 'image/gif', + '.webp': 'image/webp' + } + + file_ext = os.path.splitext(image_path)[1].lower() + mime_type = mime_types.get(file_ext, 'image/png') + + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + payload = { + "question": question, + "uploads": [ + { + "data": f"data:{mime_type};base64,{encoded_image}", + "type": "file", + "name": os.path.basename(image_path), + "mime": mime_type + } + ] + } + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Example usage +result = upload_image( + flow_id="your-flow-id", + question="Can you describe what you see in this image?", + image_path="path/to/your/image.png" +) + +print(result) +``` +{% endtab%} + +{% Tab Title = "Python (URL)"%} +```python +import requests +import os + +def upload_image_url(flow_id, question, image_url, image_name=None): + """ + Upload an image using a URL instead of base64 encoding. + This is more efficient for images that are already hosted online. + """ + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + # Extract filename from URL if not provided + if not image_name: + image_name = image_url.split('/')[-1] + if '?' in image_name: + image_name = image_name.split('?')[0] + + # Determine MIME type from URL extension + mime_types = { + '.png': 'image/png', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.gif': 'image/gif', + '.webp': 'image/webp' + } + + file_ext = os.path.splitext(image_name)[1].lower() + mime_type = mime_types.get(file_ext, 'image/jpeg') + + payload = { + "question": question, + "uploads": [ + { + "data": image_url, + "type": "url", + "name": image_name, + "mime": mime_type + } + ] + } + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Example usage with public image URL +result = upload_image_url( + flow_id="your-flow-id", + question="What's in this image? Analyze it in detail.", + image_url="https://example.com/path/to/image.jpg", + image_name="example_image.jpg" +) + +print(result) + +# Example with direct URL (no custom name) +result2 = upload_image_url( + chatflow_id="your-chatflow-id", + question="Describe this screenshot", + image_url="https://i.imgur.com/sample.png" +) + +print(result2) +``` +{% endtab%} + +{% tab title = "JavaScript (Fichier Téléchargement)"%} +```javascript +async function uploadImage(flowId, question, imageFile) { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + + reader.onload = async function(e) { + const base64Data = e.target.result; + + const payload = { + question: question, + uploads: [ + { + data: base64Data, + type: 'file', + name: imageFile.name, + mime: imageFile.type + } + ] + }; + + try { + const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const result = await response.json(); + resolve(result); + + } catch (error) { + reject(error); + } + }; + + reader.onerror = function() { + reject(new Error('Failed to read file')); + }; + + reader.readAsDataURL(imageFile); + }); +} + +// Example usage in browser +document.getElementById('imageInput').addEventListener('change', async function(e) { + const file = e.target.files[0]; + if (file) { + try { + const result = await uploadImage( + 'your-flow-id', + 'Can you describe what you see in this image?', + file + ); + console.log('Analysis result:', result); + } catch (error) { + console.error('Upload failed:', error); + } + } +}); +``` +{% endtab%} + +{% tab title = "javascript (url)"%} +```javascript +async function uploadImageUrl(flowId, question, imageUrl, imageName = null) { + /** + * Upload an image using a URL instead of base64 encoding. + * This is more efficient for images that are already hosted online. + */ + + // Extract filename from URL if not provided + if (!imageName) { + imageName = imageUrl.split('/').pop(); + if (imageName.includes('?')) { + imageName = imageName.split('?')[0]; + } + } + + // Determine MIME type from URL extension + const mimeTypes = { + '.png': 'image/png', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.gif': 'image/gif', + '.webp': 'image/webp' + }; + + const fileExt = imageName.toLowerCase().substring(imageName.lastIndexOf('.')); + const mimeType = mimeTypes[fileExt] || 'image/jpeg'; + + const payload = { + question: question, + uploads: [ + { + data: imageUrl, + type: 'url', + name: imageName, + mime: mimeType + } + ] + }; + + try { + const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } +} + +// Example usage with public image URL +async function analyzeImageFromUrl() { + try { + const result = await uploadImageUrl( + 'your-flow-id', + 'What is in this image? Analyze it in detail.', + 'https://example.com/path/to/image.jpg', + 'example_image.jpg' + ); + + console.log('Analysis result:', result); + } catch (error) { + console.error('Upload failed:', error); + } +} + +// Example with direct URL (no custom name) +uploadImageUrl( + 'your-flow-id', + 'Describe this screenshot', + 'https://i.imgur.com/sample.png' +).then(result => { + if (result) { + console.log('Analysis result:', result); + } +}); + +// Example with multiple image URLs +async function analyzeMultipleImages() { + const imageUrls = [ + 'https://example.com/image1.jpg', + 'https://example.com/image2.png', + 'https://example.com/image3.gif' + ]; + + const results = await Promise.all( + imageUrls.map(url => + uploadImageUrl( + 'your-flow-id', + `Analyze this image: ${url}`, + url + ) + ) + ); + + results.forEach((result, index) => { + console.log(`Image ${index + 1} analysis:`, result); + }); +} +``` +{% endtab%} + +{% tab title = "javascript (node.js)"%} +```javascript +const fs = require('fs'); +const path = require('path'); + +async function uploadImage(flowId, question, imagePath) { + // Read image file + const imageBuffer = fs.readFileSync(imagePath); + const base64Image = imageBuffer.toString('base64'); + + // Determine MIME type + const ext = path.extname(imagePath).toLowerCase(); + const mimeTypes = { + '.png': 'image/png', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.gif': 'image/gif', + '.webp': 'image/webp' + }; + const mimeType = mimeTypes[ext] || 'image/png'; + + const payload = { + question: question, + uploads: [ + { + data: `data:${mimeType};base64,${base64Image}`, + type: 'file', + name: path.basename(imagePath), + mime: mimeType + } + ] + }; + + try { + const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } +} + +// Example usage +uploadImage( + 'your-flow-id', + 'Can you describe what you see in this image?', + 'path/to/your/image.png' +).then(result => { + console.log('Analysis result:', result); +}); +``` +{% endtab%} +{% endtabs%} + +### Téléchargements audio (discours au texte) + +Téléchargez des fichiers audio pour le traitement de la parole à texte. Se référer à[Audio](uploads.md#audio)Pour plus de référence. + +** Structure de téléchargement: ** + +```json +{ + "data": "", + "type": "", + "name": ", + "mime": " +} +``` + +** Données: ** base64 ou URL d'un audio + +**Taper**:`url`ou`file` + +** Nom: ** Nom de l'audio + +**Mime**:`audio/mp4`, `audio/webm`, `audio/wav`, `audio/mpeg` + +{% Tabs%} +{% Tab Title = "Python (base64)"%} +```python +import requests +import base64 +import os + +def upload_audio(flow_id, audio_path, question=None): + # Read and encode audio + with open(audio_path, 'rb') as audio_file: + encoded_audio = base64.b64encode(audio_file.read()).decode('utf-8') + + # Determine MIME type based on file extension + mime_types = { + '.webm': 'audio/webm', + '.wav': 'audio/wav', + '.mp3': 'audio/mpeg', + '.m4a': 'audio/mp4' + } + + file_ext = os.path.splitext(audio_path)[1].lower() + mime_type = mime_types.get(file_ext, 'audio/webm') + + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + payload = { + "uploads": [ + { + "data": f"data:{mime_type};base64,{encoded_audio}", + "type": "audio", + "name": os.path.basename(audio_path), + "mime": mime_type + } + ] + } + + # Add question if provided + if question: + payload["question"] = question + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Example usage +result = upload_audio( + flow_id="your-flow-id", + audio_path="path/to/your/audio.wav", + question="Please transcribe this audio and summarize the content" +) + +print(result) +``` +{% endtab%} + +{% Tab Title = "Python (URL)"%} +```python +import requests +import os + +def upload_audio_url(flow_id, audio_url, question=None, audio_name=None): + """ + Upload an audio file using a URL instead of base64 encoding. + This is more efficient for audio files that are already hosted online. + """ + url = f"http://localhost:3000/api/v1/prediction/{flow_id}" + + # Extract filename from URL if not provided + if not audio_name: + audio_name = audio_url.split('/')[-1] + if '?' in audio_name: + audio_name = audio_name.split('?')[0] + + # Determine MIME type from URL extension + mime_types = { + '.webm': 'audio/webm', + '.wav': 'audio/wav', + '.mp3': 'audio/mpeg', + '.m4a': 'audio/mp4', + '.ogg': 'audio/ogg', + '.aac': 'audio/aac' + } + + file_ext = os.path.splitext(audio_name)[1].lower() + mime_type = mime_types.get(file_ext, 'audio/wav') + + payload = { + "uploads": [ + { + "data": audio_url, + "type": "url", + "name": audio_name, + "mime": mime_type + } + ] + } + + # Add question if provided + if question: + payload["question"] = question + + try: + response = requests.post(url, json=payload) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"Error: {e}") + return None + +# Example usage with public audio URL +result = upload_audio_url( + flow_id="your-flow-id", + audio_url="https://example.com/path/to/speech.mp3", + question="Please transcribe this audio and provide a summary", + audio_name="speech_recording.mp3" +) + +print(result) + +# Example with direct URL (no custom name or question) +result2 = upload_audio_url( + flow_id="your-flow-id", + audio_url="https://storage.googleapis.com/sample-audio/speech.wav" +) + +print(result2) + +# Example for meeting transcription +result3 = upload_audio_url( + flow_id="your-flow-id", + audio_url="https://meetings.example.com/recording-123.m4a", + question="Transcribe this meeting recording and extract key action items and decisions made", + audio_name="team_meeting_jan15.m4a" +) + +print(result3) +``` +{% endtab%} + +{% tab title = "JavaScript (Fichier Téléchargement)"%} +```javascript +async function uploadAudio(flowId, audioFile, question = null) { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + + reader.onload = async function(e) { + const base64Data = e.target.result; + + const payload = { + uploads: [ + { + data: base64Data, + type: 'audio', + name: audioFile.name, + mime: audioFile.type + } + ] + }; + + // Add question if provided + if (question) { + payload.question = question; + } + + try { + const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const result = await response.json(); + resolve(result); + + } catch (error) { + reject(error); + } + }; + + reader.onerror = function() { + reject(new Error('Failed to read file')); + }; + + reader.readAsDataURL(audioFile); + }); +} + +// Example usage with file input +document.getElementById('audioInput').addEventListener('change', async function(e) { + const file = e.target.files[0]; + if (file) { + try { + const result = await uploadAudio( + 'your-flow-id', + file, + 'Please transcribe this audio and summarize the content' + ); + console.log('Transcription result:', result); + } catch (error) { + console.error('Upload failed:', error); + } + } +}); +``` +{% endtab%} + +{% tab title = "javascript (url)"%} +```javascript +async function uploadAudioUrl(flowId, audioUrl, question = null, audioName = null) { + /** + * Upload an audio file using a URL instead of base64 encoding. + * This is more efficient for audio files that are already hosted online. + */ + + // Extract filename from URL if not provided + if (!audioName) { + audioName = audioUrl.split('/').pop(); + if (audioName.includes('?')) { + audioName = audioName.split('?')[0]; + } + } + + // Determine MIME type from URL extension + const mimeTypes = { + '.webm': 'audio/webm', + '.wav': 'audio/wav', + '.mp3': 'audio/mpeg', + '.m4a': 'audio/mp4', + '.ogg': 'audio/ogg', + '.aac': 'audio/aac' + }; + + const fileExt = audioName.toLowerCase().substring(audioName.lastIndexOf('.')); + const mimeType = mimeTypes[fileExt] || 'audio/wav'; + + const payload = { + uploads: [ + { + data: audioUrl, + type: 'url', + name: audioName, + mime: mimeType + } + ] + }; + + // Add question if provided + if (question) { + payload.question = question; + } + + try { + const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Error:', error); + return null; + } +} + +// Example usage with public audio URL +async function transcribeAudioFromUrl() { + try { + const result = await uploadAudioUrl( + 'your-flow-id', + 'https://example.com/path/to/speech.mp3', + 'Please transcribe this audio and provide a summary', + 'speech_recording.mp3' + ); + + console.log('Transcription result:', result); + } catch (error) { + console.error('Upload failed:', error); + } +} + +// Example with direct URL (no custom name or question) +uploadAudioUrl( + 'your-flow-id', + 'https://storage.googleapis.com/sample-audio/speech.wav' +).then(result => { + if (result) { + console.log('Transcription result:', result); + } +}); + +// Example for meeting transcription +uploadAudioUrl( + 'your-flow-id', + 'https://meetings.example.com/recording-123.m4a', + 'Transcribe this meeting recording and extract key action items and decisions made', + 'team_meeting_jan15.m4a' +).then(result => { + if (result) { + console.log('Meeting analysis:', result); + } +}); + +// Example with multiple audio URLs for batch processing +async function transcribeMultipleAudios() { + const audioUrls = [ + { + url: 'https://example.com/interview1.wav', + question: 'Transcribe this interview and summarize key points', + name: 'interview_candidate_1.wav' + }, + { + url: 'https://example.com/interview2.mp3', + question: 'Transcribe this interview and summarize key points', + name: 'interview_candidate_2.mp3' + }, + { + url: 'https://example.com/lecture.m4a', + question: 'Transcribe this lecture and create bullet-point notes', + name: 'cs101_lecture.m4a' + } + ]; + + const results = await Promise.all( + audioUrls.map(audio => + uploadAudioUrl( + 'your-flow-id', + audio.url, + audio.question, + audio.name + ) + ) + ); + + results.forEach((result, index) => { + console.log(`Audio ${index + 1} transcription:`, result); + }); +} +``` +{% endtab%} +{% endtabs%} + +### Téléchargements de fichiers + +Téléchargez des fichiers pour faire en sorte que LLM traite les fichiers et répond à la requête liée aux fichiers. Se référer à[Files](uploads.md#files)Pour plus de référence. + +### Entrée humaine + +Pour reprendre l'exécution à partir d'un point de contrôle précédemment arrêté,`humanInput`doit être fourni. Référer[Human In The Loop](../tutorials/human-in-the-loop.md)pour plus de détails. + +** Structure d'entrée humaine ** + +```json +{ + "type": "", + "feedback": "" +} +``` + +* ** Type **: Soit`proceed`ou`reject` +* ** Feedback **: commentaires à la dernière sortie + +{% hint style = "avertissement"%} +Doit spécifier la même chose`sessionId`où l'exécution a été arrêtée +{% EndHint%} + +```json +{ + "humanInput": { + "type": "reject", + "feedback": "Include more emoji" + }, + "overrideConfig": { + "sessionId": "abc" + } +} +``` + +## Dépannage + +1. ** 404 introuvable **: Vérifiez que l'ID de flux est correct et le flux existe +2. ** 401 Accès non autorisé **: Vérifiez si le flux est protégé par la clé API +3. ** 400 Bad Demande **: Format de demande de vérification et champs requis +4. ** 413 charge utile trop grande **: réduire les tailles de fichiers ou diviser de grandes demandes +5. ** 500 Erreur du serveur interne: ** Vérifiez s'il y a une mauvaise configuration des nœuds dans le flux diff --git a/fr/using-flowise/streaming.md b/fr/using-flowise/streaming.md new file mode 100644 index 00000000..d19333b7 --- /dev/null +++ b/fr/using-flowise/streaming.md @@ -0,0 +1,106 @@ +--- +description: Learn how Flowise streaming works +--- + +# Streaming + +Si le streaming est défini lors de la prédiction, les jetons seront envoyés comme des données uniquement[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)à mesure qu'ils deviennent disponibles. + +### Utilisation de la bibliothèque Python / TS + +Flowise fournit 2 bibliothèques: + +* [Python](https://pypi.org/project/flowise/): `pip install flowise` +* [Typescript](https://www.npmjs.com/package/flowise-sdk): `npm install flowise-sdk` + +{% Tabs%} +{% tab title = "python"%} +```python +from flowise import Flowise, PredictionData + +def test_streaming(): + client = Flowise() + + # Test streaming prediction + completion = client.create_prediction( + PredictionData( + chatflowId="", + question="Tell me a joke!", + streaming=True + ) + ) + + # Process and print each streamed chunk + print("Streaming response:") + for chunk in completion: + # {event: "token", data: "hello"} + print(chunk) + + +if __name__ == "__main__": + test_streaming() +``` +{% endtab%} + +{% tab title = "TypeScript"%} +```javascript +import { FlowiseClient } from 'flowise-sdk' + +async function test_streaming() { + const client = new FlowiseClient({ baseUrl: 'http://localhost:3000' }); + + try { + // For streaming prediction + const prediction = await client.createPrediction({ + chatflowId: '', + question: 'What is the capital of France?', + streaming: true, + }); + + for await (const chunk of prediction) { + // {event: "token", data: "hello"} + console.log(chunk); + } + + } catch (error) { + console.error('Error:', error); + } +} + +// Run streaming test +test_streaming() +``` +{% endtab%} + +{% tab title = "curl"%} +```bash +curl https://localhost:3000/api/v1/predictions/{flow-id} \ + -H "Content-Type: application/json" \ + -d '{ + "question": "Hello world!", + "streaming": true + }' +``` +{% endtab%} +{% endtabs%} + +```html +event: token +data: Once upon a time... +``` + +Le flux d'événements d'une prédiction se compose des types d'événements suivants: + +| Événement | Description | +| --------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| Commencez | Le début du streaming | +| Token | Émis lorsque la prédiction diffuse une nouvelle sortie de jeton | +| Erreur | Émis lorsque la prédiction renvoie une erreur | +| Fin | Émis lorsque la prédiction se termine | +| métadonnées | Toutes les métadonnées telles que Chatid, MessageID, du flux connexe. Émis après tout, les jetons ont terminé le streaming et avant l'événement de fin | +| SourcedoDuments | Émis lorsque le flux renvoie des sources du magasin vectoriel | +| usageTools | Émis lorsque le flux a utilisé des outils | + +### Rationaliser l'application + +[https://github.com/HenryHengZJ/flowise-streamlit](https://github.com/HenryHengZJ/flowise-streamlit) diff --git a/fr/using-flowise/uploads.md b/fr/using-flowise/uploads.md new file mode 100644 index 00000000..2f656366 --- /dev/null +++ b/fr/using-flowise/uploads.md @@ -0,0 +1,413 @@ +--- +description: Learn how to use upload images, audio, and other files +--- + +# Téléchargements + +Flowise vous permet de télécharger des images, de l'audio et d'autres fichiers à partir du chat. Dans cette section, vous apprendrez à activer et à utiliser ces fonctionnalités. + +## Image + +Certains modèles de chat vous permettent de saisir des images. Reportez-vous toujours à la documentation officielle du LLM pour confirmer si le modèle prend en charge l'entrée d'image. + +* [ChatOpenAI](../integrations/llamaindex/chat-models/chatopenai.md) +* [AzureChatOpenAI](../integrations/llamaindex/chat-models/azurechatopenai.md) +* [ChatAnthropic](../integrations/langchain/chat-models/chatanthropic.md) +* [AWSChatBedrock](../integrations/langchain/chat-models/aws-chatbedrock.md) +* [ChatGoogleGenerativeAI](../integrations/langchain/chat-models/google-ai.md) +* [ChatOllama](../integrations/llamaindex/chat-models/chatollama.md) +* [Google Vertex AI](../integrations/langchain/llms/googlevertex-ai.md) + +{% hint style = "avertissement"%} +Le traitement d'image ne fonctionne qu'avec certaines chaînes / agents dans ChatFlow. + +[LLMChain](../integrations/langchain/chains/llm-chain.md), [Conversation Chain](../integrations/langchain/chains/conversation-chain.md), [ReAct Agent](../integrations/langchain/agents/react-agent-chat.md), [Conversational Agent](../integrations/langchain/agents/conversational-agent.md), [Tool Agent](../integrations/langchain/agents/tool-agent.md) +{% EndHint%} + +Si vous activez ** Autoriser le téléchargement d'images **, vous pouvez télécharger des images à partir de l'interface de chat. + +
    ", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "question": "Can you describe the image?", + "uploads": [ + { + "data": "data:image/png;base64,iVBORw0KGgdM2uN0", //base64 string or url + "type": "file", // file | url + "name": "Flowise.png", + "mime": "image/png" + } + ] +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +## Audio + +Dans la configuration de ChatFlow, vous pouvez sélectionner un module Speech-to-Text. Les intégrations prises en charge comprennent: + +* Openai +* Assemblage +* [LocalAI](../integrations/langchain/chat-models/chatlocalai.md) + +Lorsque cela est activé, les utilisateurs peuvent parler directement dans le microphone. Leur discours est transcrit en texte. + +
    + +Pour télécharger l'audio avec l'API: + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +API_URL = "http://localhost:3000/api/v1/prediction/" + +def query(payload): + response = requests.post(API_URL, json=payload) + return response.json() + +output = query({ + "uploads": [ + { + "data": "data:audio/webm;codecs=opus;base64,GkXf", # base64 string + "type": "audio", + "name": "audio.wav", + "mime": "audio/webm" + } + ] +}) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/prediction/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "uploads": [ + { + "data": "data:audio/webm;codecs=opus;base64,GkXf", // base64 string + "type": "audio", + "name": "audio.wav", + "mime": "audio/webm" + } + ] +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +## Fichiers + +Vous pouvez télécharger des fichiers de deux manières: + +* Retrouver des téléchargements de fichiers de génération augmentée (RAG) +* Téléchargements de fichiers complets + +Lorsque les deux options sont activées, les téléchargements de fichiers complets ont priorité. + +### Téléchargements de fichiers + +Vous pouvez upser des fichiers téléchargés à la volée vers le magasin vectoriel. Pour activer les téléchargements de fichiers, assurez-vous de répondre à ces conditions préalables: + +* Vous devez inclure un magasin vectoriel qui prend en charge les téléchargements de fichiers dans le ChatFlow. + * [Pinecone](../integrations/langchain/vector-stores/pinecone.md) + * [Milvus](../integrations/langchain/vector-stores/milvus.md) + * [Postgres](../integrations/langchain/vector-stores/postgres.md) + * [Qdrant](../integrations/langchain/vector-stores/qdrant.md) + * [Upstash](../integrations/langchain/vector-stores/upstash-vector.md) +* Si vous avez plusieurs magasins vectoriels dans un ChatFlow, vous ne pouvez activer que le téléchargement de fichiers pour un magasin vectoriel à la fois. +* Vous devez connecter au moins un nœud de chargeur de documents à l'entrée de document du magasin vectoriel. +* Chargeurs de documents pris en charge: + * [CSV File](../integrations/langchain/document-loaders/csv-file.md) + * [Docx File](../integrations/langchain/document-loaders/docx-file.md) + * [Json File](../integrations/langchain/document-loaders/json-file.md) + * [Json Lines File](broken-reference) + * [PDF File](../integrations/langchain/document-loaders/pdf-file.md) + * [Text File](../integrations/langchain/document-loaders/text-file.md) + * [Unstructured File](../integrations/langchain/document-loaders/unstructured-file-loader.md) + +
    + +Vous pouvez télécharger un ou plusieurs fichiers dans le chat: + +
    src = "../. GitBook / Assets / Captures 2024-08-26 170456.png" alt = "">
    + +Voici comment cela fonctionne: + +1. Les métadonnées des fichiers téléchargées sont mises à jour avec le Chatid. +2. Cela associe le fichier au Chatid. +3. Lors de l'interrogation, un filtre ** ou ** s'applique: + +* Les métadonnées contient`flowise_chatId`et la valeur est l'ID actuel de la session de chat +* Les métadonnées ne contiennent pas`flowise_chatId` + +Un exemple de vecteur incorporé sur le pignon: + +
    + +Pour ce faire avec l'API, suivez ces deux étapes: + +1. Utiliser le[Vector Upsert API](broken-reference)avec`formData`et`chatId`: + +{% Tabs%} +{% tab title = "python"%} +```python +import requests + +API_URL = "http://localhost:3000/api/v1/vector/upsert/" + +# Use form data to upload files +form_data = { + "files": ("state_of_the_union.txt", open("state_of_the_union.txt", "rb")) +} + +body_data = { + "chatId": "some-session-id" +} + +def query(form_data): + response = requests.post(API_URL, files=form_data, data=body_data) + print(response) + return response.json() + +output = query(form_data) +print(output) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +// Use FormData to upload files +let formData = new FormData(); +formData.append("files", input.files[0]); +formData.append("chatId", "some-session-id"); + +async function query(formData) { + const response = await fetch( + "http://localhost:3000/api/v1/vector/upsert/", + { + method: "POST", + body: formData + } + ); + const result = await response.json(); + return result; +} + +query(formData).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +2. Utiliser le[Prediction API](broken-reference)avec`uploads`et le`chatId`De l'étape 1: + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +API_URL = "http://localhost:3000/api/v1/prediction/" + +def query(payload): + response = requests.post(API_URL, json=payload) + return response.json() + +output = query({ + "question": "What is the speech about?", + "chatId": "same-session-id-from-step-1", + "uploads": [ + { + "data": "data:text/plain;base64,TWFkYWwcy4=", + "type": "file:rag", + "name": "state_of_the_union.txt", + "mime": "text/plain" + } + ] +}) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/prediction/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "question": "What is the speech about?", + "chatId": "same-session-id-from-step-1", + "uploads": [ + { + "data": "data:text/plain;base64,TWFkYWwcy4=", + "type": "file:rag", + "name": "state_of_the_union.txt", + "mime": "text/plain" + } + ] +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +### Téléchargements de fichiers complets + +Avec les téléchargements de fichiers de chiffon, vous ne pouvez pas travailler avec des données structurées comme des feuilles de calcul ou des tables, et vous ne pouvez pas effectuer une résumé complet en raison du manque de contexte complet. Dans certains cas, vous voudrez peut-être inclure tout le contenu de fichier directement dans l'invite pour un LLM, en particulier avec des modèles comme Gemini et Claude qui ont des fenêtres de contexte plus longues.[This research paper](https://arxiv.org/html/2407.16833v1)est l'un des nombreux qui comparent le chiffon avec des fenêtres de contexte plus long. + +Pour activer les téléchargements de fichiers complets, accédez à ** CHATFLOW Configuration **, ouvrez l'onglet ** Fichier Upload **, puis cliquez sur le commutateur: + +
    + +Vous pouvez voir le bouton ** File Pièce ** dans le chat, où vous pouvez télécharger un ou plusieurs fichiers. Sous le capot, le[File Loader](../integrations/langchain/document-loaders/file-loader.md)traite chaque fichier et le convertit en texte. + +
    + +Notez que si votre ChatFlow utilise un nœud de modèle d'invite de chat, une entrée doit être créée à partir des valeurs d'invite ** de format ** pour passer les données du fichier. Le nom d'entrée spécifié (par exemple {fichier}) doit être inclus dans le champ ** Message humain **. + +
    + +Pour télécharger des fichiers avec l'API: + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +API_URL = "http://localhost:3000/api/v1/prediction/" + +def query(payload): + response = requests.post(API_URL, json=payload) + return response.json() + +output = query({ + "question": "What is the data about?", + "chatId": "some-session-id", + "uploads": [ + { + "data": "data:text/plain;base64,TWFkYWwcy4=", + "type": "file:full", + "name": "state_of_the_union.txt", + "mime": "text/plain" + } + ] +}) +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +async function query(data) { + const response = await fetch( + "http://localhost:3000/api/v1/prediction/", + { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(data) + } + ); + const result = await response.json(); + return result; +} + +query({ + "question": "What is the data about?", + "chatId": "some-session-id", + "uploads": [ + { + "data": "data:text/plain;base64,TWFkYWwcy4=", + "type": "file:full", + "name": "state_of_the_union.txt", + "mime": "text/plain" + } + ] +}).then((response) => { + console.log(response); +}); +``` +{% endtab%} +{% endtabs%} + +Comme vous pouvez le voir dans les exemples, les téléchargements nécessitent une chaîne Base64. Pour obtenir une chaîne Base64 pour un fichier, utilisez le[Create Attachments API](../api-reference/attachments.md). + +### Différence entre les téléchargements complets et de chiffon + +Les téléchargements de fichiers Full et RAG (RETRIEVAL-AUGMED) servent des objectifs différents. + +* ** Téléchargement complet du fichier **: Cette méthode analyse l'ensemble du fichier dans une chaîne et l'envoie au LLM (modèle de grande langue). Il est bénéfique pour résumer le document ou extraire des informations clés. Cependant, avec des fichiers très volumineux, le modèle peut produire des résultats inexacts ou des «hallucinations» en raison de limitations de jetons. +* ** Rag Fichier Upload **: Recommandé si vous visez à réduire les coûts de jetons en n'envoyant pas l'intégralité du texte au LLM. Cette approche convient aux tâches Q \ & A sur les documents, mais n'est pas idéale pour le résumé car il n'a pas le contexte complet du document. Cette approche peut prendre plus de temps en raison du processus Upsert. diff --git a/fr/using-flowise/upsertion.md b/fr/using-flowise/upsertion.md new file mode 100644 index 00000000..2d7bdc1c --- /dev/null +++ b/fr/using-flowise/upsertion.md @@ -0,0 +1,677 @@ +# Augmentation + +Upsert fait référence au processus de téléchargement et de traitement des documents dans les magasins vectoriels, formant les bases des systèmes de génération augmentée (RAG) de récupération. + +Il existe deux façons fondamentales de renverser les données dans Vector Store: + +* [Document Stores (Recommended)](document-stores.md) +* ChatFlow Upsert + +Nous vous recommandons fortement d'utiliser des magasins de documents car il fournit une interface unifiée pour aider avec les pipelines de chiffon - récupérer des données à partir de différentes sources, la stratégie de section, la mise en œuvre de la base de données vectorielle, la synchronisation avec les données mises à jour. + +Dans ce guide, nous allons couvrir une autre méthode - ChatFlow Upsert. Il s'agit d'une méthode plus ancienne avant les magasins de documents. + +Pour plus de détails, voir le[Vector Upsert Endpoint API Reference](../api-reference/vector-upsert.md). + +## Comprendre le processus de mise en service + +ChatFlow vous permet de créer un flux qui peut effectuer à la fois le processus de requête à la hausse et de requête RAG, les deux peuvent être exécutés de manière identique. + +

    Upsert vs Rag

    + +## Installation + +Pour qu'un processus Upsert fonctionne, nous aurions besoin de créer un ** Flow userting ** avec 5 nœuds différents: + +1. Chargeur de documents +2. Séparateur de texte +3. Modèle d'incorporation +4. Magasin vectoriel +5. Record Manager (facultatif) + +Tous les éléments ont été couverts par[Document Stores](document-stores.md), reportez-vous là pour plus de détails. + +Une fois que le flux est correctement configuré, il y aura un bouton vert en haut à droite qui permet à l'utilisateur de démarrer le processus UPSERT. + +
    + +
    + +### Chargeurs de documents avec téléchargement de fichiers + +#### Types de documents pris en charge + +| Chargeur de documents | Types de fichiers | +| ----------------- | ---------- | +| Fichier CSV |`.csv` | +| Docx / Word Fichier |`.docx` | +| Fichier JSON |`.json` | +| Fichier de lignes JSON |`.jsonl` | +| Fichier PDF |`.pdf` | +| Fichier texte |`.txt` | +| Fichier Excel |`.xlsx` | +| Fichier PowerPoint |`.pptx` | +| Chargeur de fichiers | Multiple | +| Fichier non structuré | Multiple | + +{% hint style = "info"%} +** IMPORTANT **: Assurez-vous que le type de fichier correspond à votre configuration de chargeur de document. Pour une flexibilité maximale, envisagez d'utiliser le chargeur de fichiers qui prend en charge plusieurs types de fichiers. +{% EndHint%} + +#### Demander le format (données de formulaire) + +Lors du téléchargement de fichiers, utilisez`multipart/form-data`Au lieu de JSON: + +#### Exemples + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +import os + +def upsert_document(chatflow_id, file_path, config=None): + """ + Upsert a single document to a vector store. + + Args: + chatflow_id (str): The chatflow ID configured for vector upserting + file_path (str): Path to the file to upload + return_source_docs (bool): Whether to return source documents in response + config (dict): Optional configuration overrides + + Returns: + dict: API response containing upsert results + """ + url = f"http://localhost:3000/api/v1/vector/upsert/{chatflow_id}" + + # Prepare file data + files = { + 'files': (os.path.basename(file_path), open(file_path, 'rb')) + } + + # Prepare form data + data = {} + + # Add configuration overrides if provided + if config: + data['overrideConfig'] = str(config).replace("'", '"') # Convert to JSON string + + try: + response = requests.post(url, files=files, data=data) + response.raise_for_status() + + return response.json() + + except requests.exceptions.RequestException as e: + print(f"Upload failed: {e}") + return None + finally: + # Always close the file + files['files'][1].close() + +# Example usage +result = upsert_document( + chatflow_id="your-chatflow-id", + file_path="documents/knowledge_base.pdf", + config={ + "chunkSize": 1000, + "chunkOverlap": 200 + } +) + +if result: + print(f"Successfully upserted {result.get('numAdded', 0)} chunks") + if result.get('sourceDocuments'): + print(f"Source documents: {len(result['sourceDocuments'])}") +else: + print("Upload failed") +``` +{% endtab%} + +{% Tab Title = "JavaScript (Browser)"%} +```javascript +class VectorUploader { + constructor(baseUrl = 'http://localhost:3000') { + this.baseUrl = baseUrl; + } + + async upsertDocument(chatflowId, file, config = {}) { + /** + * Upload a file to vector store from browser + * @param {string} chatflowId - The chatflow ID + * @param {File} file - File object from input element + * @param {Object} config - Optional configuration + */ + + const formData = new FormData(); + formData.append('files', file); + + if (config.overrideConfig) { + formData.append('overrideConfig', JSON.stringify(config.overrideConfig)); + } + + try { + const response = await fetch(`${this.baseUrl}/api/v1/vector/upsert/${chatflowId}`, { + method: 'POST', + body: formData + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + const result = await response.json(); + return result; + + } catch (error) { + console.error('Upload failed:', error); + throw error; + } + } + + +} + +// Example usage in browser +const uploader = new VectorUploader(); + +// Single file upload +document.getElementById('fileInput').addEventListener('change', async function(e) { + const file = e.target.files[0]; + if (file) { + try { + const result = await uploader.upsertDocument( + 'your-chatflow-id', + file, + { + overrideConfig: { + chunkSize: 1000, + chunkOverlap: 200 + } + } + ); + + console.log('Upload successful:', result); + alert(`Successfully processed ${result.numAdded || 0} chunks`); + + } catch (error) { + console.error('Upload failed:', error); + alert('Upload failed: ' + error.message); + } + } +}); +``` +{% endtab%} + +{% tab title = "javascript (node.js)"%} +```javascript +const fs = require('fs'); +const path = require('path'); +const FormData = require('form-data'); +const fetch = require('node-fetch'); + +class NodeVectorUploader { + constructor(baseUrl = 'http://localhost:3000') { + this.baseUrl = baseUrl; + } + + async upsertDocument(chatflowId, filePath, config = {}) { + /** + * Upload a file to vector store from Node.js + * @param {string} chatflowId - The chatflow ID + * @param {string} filePath - Path to the file + * @param {Object} config - Optional configuration + */ + + if (!fs.existsSync(filePath)) { + throw new Error(`File not found: ${filePath}`); + } + + const formData = new FormData(); + const fileStream = fs.createReadStream(filePath); + + formData.append('files', fileStream, { + filename: path.basename(filePath), + contentType: this.getMimeType(filePath) + }); + + if (config.overrideConfig) { + formData.append('overrideConfig', JSON.stringify(config.overrideConfig)); + } + + try { + const response = await fetch(`${this.baseUrl}/api/v1/vector/upsert/${chatflowId}`, { + method: 'POST', + body: formData, + headers: formData.getHeaders() + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`HTTP ${response.status}: ${errorText}`); + } + + return await response.json(); + + } catch (error) { + console.error('Upload failed:', error); + throw error; + } + } + + getMimeType(filePath) { + const ext = path.extname(filePath).toLowerCase(); + const mimeTypes = { + '.pdf': 'application/pdf', + '.txt': 'text/plain', + '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + '.csv': 'text/csv', + '.json': 'application/json' + }; + return mimeTypes[ext] || 'application/octet-stream'; + } +} + +// Example usage +async function main() { + const uploader = new NodeVectorUploader(); + + try { + // Single file upload + const result = await uploader.upsertDocument( + 'your-chatflow-id', + './documents/manual.pdf', + { + overrideConfig: { + chunkSize: 1200, + chunkOverlap: 100 + } + } + ); + + console.log('Single file upload result:', result); + } catch (error) { + console.error('Process failed:', error); + } +} + +// Run if this file is executed directly +if (require.main === module) { + main(); +} + +module.exports = { NodeVectorUploader }; +``` +{% endtab%} + +{% tab title = "curl"%} +```bash +# Basic file upload with cURL +curl -X POST "http://localhost:3000/api/v1/vector/upsert/your-chatflow-id" \ + -F "files=@documents/knowledge_base.pdf" + +# File upload with configuration override +curl -X POST "http://localhost:3000/api/v1/vector/upsert/your-chatflow-id" \ + -F "files=@documents/manual.pdf" \ + -F 'overrideConfig={"chunkSize": 1000, "chunkOverlap": 200}' + +# Upload with custom headers for authentication (if configured) +curl -X POST "http://localhost:3000/api/v1/vector/upsert/your-chatflow-id" \ + -H "Authorization: Bearer your-api-token" \ + -F "files=@documents/faq.txt" \ + -F 'overrideConfig={"chunkSize": 800, "chunkOverlap": 150}' +``` +{% endtab%} +{% endtabs%} + +### Chargeurs de documents sans téléchargement de fichiers + +Pour les chargeurs de documents qui ne nécessitent pas de téléchargements de fichiers (par exemple, les grabyers Web, les connecteurs de base de données, les intégrations API), utilisez le format JSON similaire à l'API de prédiction. + +#### Exemples + +{% Tabs%} +{% tab title = "python"%} +```python +import requests +from typing import Dict, Any, Optional + +def upsert(chatflow_id: str, config: Optional[Dict[str, Any]] = None) -> Optional[Dict[str, Any]]: + """ + Trigger vector upserting for chatflows that don't require file uploads. + + Args: + chatflow_id: The chatflow ID configured for vector upserting + config: Optional configuration overrides + + Returns: + API response containing upsert results + """ + url = f"http://localhost:3000/api/v1/vector/upsert/{chatflow_id}" + + payload = { + "overrideConfig": config + } + + headers = { + "Content-Type": "application/json" + } + + try: + response = requests.post(url, json=payload, headers=headers, timeout=300) + response.raise_for_status() + + return response.json() + + except requests.exceptions.RequestException as e: + print(f"Upsert failed: {e}") + return None + +result = upsert( + chatflow_id="chatflow-id", + config={ + "chunkSize": 800, + "chunkOverlap": 100, + } +) + +if result: + print(f"Upsert completed: {result.get('numAdded', 0)} chunks added") +``` +{% endtab%} + +{% tab title = "javascript"%} +```javascript +class NoFileUploader { + constructor(baseUrl = 'http://localhost:3000') { + this.baseUrl = baseUrl; + } + + async upsertWithoutFiles(chatflowId, config = {}) { + /** + * Trigger vector upserting for flows that don't need file uploads + * @param {string} chatflowId - The chatflow ID + * @param {Object} config - Configuration overrides + */ + + const payload = { + overrideConfig: config + }; + + try { + const response = await fetch(`${this.baseUrl}/api/v1/vector/upsert/${chatflowId}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(payload) + }); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + return await response.json(); + + } catch (error) { + console.error('Upsert failed:', error); + throw error; + } + } + + async scheduledUpsert(chatflowId, interval = 3600000) { + /** + * Set up scheduled upserting for dynamic content sources + * @param {string} chatflowId - The chatflow ID + * @param {number} interval - Interval in milliseconds (default: 1 hour) + */ + + console.log(`Starting scheduled upsert every ${interval/1000} seconds`); + + const performUpsert = async () => { + try { + console.log('Performing scheduled upsert...'); + + const result = await this.upsertWithoutFiles(chatflowId, { + addMetadata: { + scheduledUpdate: true, + timestamp: new Date().toISOString() + } + }); + + console.log(`Scheduled upsert completed: ${result.numAdded || 0} chunks processed`); + + } catch (error) { + console.error('Scheduled upsert failed:', error); + } + }; + + // Perform initial upsert + await performUpsert(); + + // Set up recurring upserts + return setInterval(performUpsert, interval); + } +} + +// Example usage +const uploader = new NoFileUploader(); + +async function performUpsert() { + try { + const result = await uploader.upsertWithoutFiles( + 'chatflow-id', + { + chunkSize: 800, + chunkOverlap: 100 + } + ); + + console.log('Upsert result:', result); + + } catch (error) { + console.error('Upsert failed:', error); + } +} + +// One time upsert +await performUpsert(); + +// Set up scheduled updates (every 30 minutes) +const schedulerHandle = await uploader.scheduledUpsert( + 'dynamic-content-chatflow-id', + 30 * 60 * 1000 +); + +// To stop scheduled updates later: +// clearInterval(schedulerHandle); +``` +{% endtab%} + +{% tab title = "curl"%} +```bash +# Basic upsert with cURL +curl -X POST "http://localhost:3000/api/v1/vector/upsert/your-chatflow-id" \ + -H "Content-Type: application/json" + +# Upsert with configuration override +curl -X POST "http://localhost:3000/api/v1/vector/upsert/your-chatflow-id" \ + -H "Content-Type: application/json" \ + -d '{ + "overrideConfig": { + "returnSourceDocuments": true + } + }' + +# Upsert with custom headers for authentication (if configured) +curl -X POST "http://localhost:3000/api/v1/vector/upsert/your-chatflow-id" \ + -H "Authorization: Bearer your-api-token" \ + -H "Content-Type: application/json" +``` +{% endtab%} +{% endtabs%} + +## Champs de réponse + +| Champ | Type | Description | +| ------------ | ------ | ----------------------------------------------------------- | +| `numAdded`| Numéro | Nombre de nouveaux morceaux ajoutés au magasin vectoriel | +| `numDeleted`| Numéro | Nombre de morceaux supprimés (si vous utilisez Record Manager) | +| `numSkipped`| Numéro | Nombre de morceaux sautés (si vous utilisez un gestionnaire d'enregistrements) | +| `numUpdated`| Numéro | Nombre de morceaux existants mis à jour (si vous utilisez un gestionnaire d'enregistrements) | + +## Stratégies d'optimisation + +### 1. Stratégies de traitement par lots + +```python +def intelligent_batch_processing(files: List[str], chatflow_id: str) -> Dict[str, Any]: + """Process files in optimized batches based on size and type.""" + + # Group files by size and type + small_files = [] + large_files = [] + + for file_path in files: + file_size = os.path.getsize(file_path) + if file_size > 5_000_000: # 5MB + large_files.append(file_path) + else: + small_files.append(file_path) + + results = {'successful': [], 'failed': [], 'totalChunks': 0} + + # Process large files individually + for file_path in large_files: + print(f"Processing large file: {file_path}") + # Individual processing with custom config + # ... implementation + + # Process small files in batches + batch_size = 5 + for i in range(0, len(small_files), batch_size): + batch = small_files[i:i + batch_size] + print(f"Processing batch of {len(batch)} small files") + # Batch processing + # ... implementation + + return results +``` + +### 2. Optimisation des métadonnées + +```python +import requests +import os +from datetime import datetime +from typing import Dict, Any + +def upsert_with_optimized_metadata(chatflow_id: str, file_path: str, + department: str = None, category: str = None) -> Dict[str, Any]: + """ + Upsert document with automatically optimized metadata. + """ + url = f"http://localhost:3000/api/v1/vector/upsert/{chatflow_id}" + + # Generate optimized metadata + custom_metadata = { + 'department': department or 'general', + 'category': category or 'documentation', + 'indexed_date': datetime.now().strftime('%Y-%m-%d'), + 'version': '1.0' + } + + optimized_metadata = optimize_metadata(file_path, custom_metadata) + + # Prepare request + files = {'files': (os.path.basename(file_path), open(file_path, 'rb'))} + data = { + 'overrideConfig': str({ + 'metadata': optimized_metadata + }).replace("'", '"') + } + + try: + response = requests.post(url, files=files, data=data) + response.raise_for_status() + return response.json() + finally: + files['files'][1].close() + +# Example usage with different document types +results = [] + +# Technical documentation +tech_result = upsert_with_optimized_metadata( + chatflow_id="tech-docs-chatflow", + file_path="docs/api_reference.pdf", + department="engineering", + category="technical_docs" +) +results.append(tech_result) + +# HR policies +hr_result = upsert_with_optimized_metadata( + chatflow_id="hr-docs-chatflow", + file_path="policies/employee_handbook.pdf", + department="human_resources", + category="policies" +) +results.append(hr_result) + +# Marketing materials +marketing_result = upsert_with_optimized_metadata( + chatflow_id="marketing-chatflow", + file_path="marketing/product_brochure.pdf", + department="marketing", + category="promotional" +) +results.append(marketing_result) + +for i, result in enumerate(results): + print(f"Upload {i+1}: {result.get('numAdded', 0)} chunks added") +``` + +## Dépannage + +1. ** le téléchargement de fichiers échoue ** + * Vérifier la compatibilité du format de fichier + * Vérifiez les limites de taille du fichier +2. ** Traitement de temps mort ** + * Augmenter le délai de demande + * Cassez les fichiers volumineux en pièces plus petites + * Optimiser la taille du morceau +3. ** Erreurs du magasin vectoriel ** + * Vérifiez la connectivité du magasin vectoriel + * Vérifiez la compatibilité des dimensions du modèle d'intégration diff --git a/fr/using-flowise/variables.md b/fr/using-flowise/variables.md new file mode 100644 index 00000000..df2f31fa --- /dev/null +++ b/fr/using-flowise/variables.md @@ -0,0 +1,74 @@ +--- +description: Learn how to use variables in Flowise +--- + +# Variables + +*** + +Flowise permet aux utilisateurs de créer des variables qui peuvent être utilisées dans les nœuds. Les variables peuvent être statiques ou d'exécution. + +### Statique + +La variable statique sera enregistrée avec la valeur spécifiée et récupérée telle qu'elle est. + +
    + +### Remplacer ou régler la variable via l'API + +Afin de remplacer la valeur de la variable, l'utilisateur doit l'activer explicitement à partir du bouton supérieur droit: + +** Paramètres ** -> ** Configuration ** -> ** Sécurité ** Tab: + +
    + +S'il existe une variable existante créée, la valeur de variable fournie dans l'API remplacera la valeur existante. + +```json +{ + "question": "hello", + "overrideConfig": { + "vars": { + "var": "some-override-value" + } + } +} +``` + +### En utilisant des variables + +Les variables peuvent être utilisées par les nœuds en flux. Par exemple, une variable nommée **`character`** est créé: + +
    + +Nous pouvons alors utiliser cette variable comme **`$vars.`** Dans la fonction des nœuds suivants: + +* [Custom Tool](../integrations/langchain/tools/custom-tool.md) +* [Custom Function](../integrations/utilities/custom-js-function.md) +* [Custom Loader](../integrations/langchain/document-loaders/custom-document-loader.md) +* [If Else](../integrations/utilities/if-else.md) +* MCP personnalisé + +
    + +En outre, l'utilisateur peut également utiliser la variable dans l'entrée de texte de n'importe quel nœud avec le format suivant: + +**`{{$vars.}}`** + +Par exemple, dans le message du système d'agent: + +
    + +## Ressources + +* [Pass Variables to Function](../integrations/langchain/tools/custom-tool.md#pass-variables-to-function) diff --git a/fr/using-flowise/workspaces.md b/fr/using-flowise/workspaces.md new file mode 100644 index 00000000..24fdc80e --- /dev/null +++ b/fr/using-flowise/workspaces.md @@ -0,0 +1,167 @@ +# Espaces de travail + +{% hint style = "info"%} +Les évaluations ne sont disponibles que pour le cloud et le plan d'entreprise +{% EndHint%} + +Lors de votre connexion initiale, un espace de travail par défaut sera généré automatiquement pour vous. Les espaces de travail servent à partitionner les ressources entre diverses équipes ou unités commerciales. À l'intérieur de chaque espace de travail, le contrôle d'accès basé sur les rôles (RBAC) est utilisé pour gérer les autorisations et l'accès, garantissant que les utilisateurs n'ont accès qu'aux ressources et paramètres requis pour leur rôle. + +
    + +## Configuration du compte d'administration + + + + Pour l'entreprise auto-hébergée, les variables Env suivantes doivent être définies + +``` +JWT_AUTH_TOKEN_SECRET +JWT_REFRESH_TOKEN_SECRET +JWT_ISSUER +JWT_AUDIENCE +JWT_TOKEN_EXPIRY_IN_MINUTES +JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES +PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS +PASSWORD_SALT_HASH_ROUNDS +TOKEN_HASH_SECRET +``` + + + +Par défaut, la nouvelle installation de Flowise nécessitera une configuration d'administration, similaire à la façon dont vous devez configurer un utilisateur racine pour votre base de données initialement. + +
    + +Vous vous verrez ajouté en tant qu'administrateur de l'organisation dans l'espace de travail que vous avez créé. + +
    + +Pour inviter de nouveaux utilisateurs dans l'espace de travail, vous devez d'abord créer un rôle. + +## Créer un rôle + +Accédez à des rôles dans la barre gauche et cliquez sur Ajouter un rôle: + +
    + +## Inviter l'utilisateur + + + + Pour l'entreprise auto-hébergée, les variables Env suivantes doivent être définies + +``` +INVITE_TOKEN_EXPIRY_IN_HOURS +SMTP_HOST +SMTP_PORT +SMTP_USER +SMTP_PASSWORD +``` + + + +Accédez aux utilisateurs de la barre gauche, vous vous verrez comme l'administrateur du compte. Ceci est indiqué par l'icône de la personne avec une étoile: + +
    + +Cliquez sur Inviter. L'e-mail invité recevra une invitation: + +
    + +En cliquant sur le lien d'invitation, l'utilisateur invité sera amené à une page d'inscription. + +
    + +Après avoir été inscrit et connecté en tant qu'utilisateur invité, vous serez dans l'espace de travail attribué, et il n'y aura pas de section de gestion de l'utilisateur et de l'espace de travail: + +
    + +Si vous êtes invité dans plusieurs espaces de travail, vous pouvez passer à différents espaces de travail à partir du bouton déroulant supérieur droit. Ici, nous sommes affectés à Workspace 2 avec ** Voir uniquement ** Permission. Vous pouvez remarquer que le bouton Ajouter un nouveau pour ChatFlow n'est plus visible. Cela garantit que l'utilisateur ne peut que visualiser, et non créer, mettre à jour ni supprimer. Les mêmes règles RBAC s'appliquent également à l'API. + +
    + +Maintenant, de retour à l'administrateur de compte, vous pourrez voir les utilisateurs invités, leur statut, leurs rôles et l'espace de travail actif: + +
    + +L'administrateur du compte peut également modifier les paramètres des autres utilisateurs: + +
    + +## Activité de connexion + +L'administrateur pourra voir chaque connexion et déconnexion de tous les utilisateurs: + +
    + +## Création d'un élément dans l'espace de travail + +Tous les éléments créés dans un espace de travail sont isolés d'un autre espace de travail. Les espaces de travail sont un moyen de regrouper logiquement les utilisateurs et les ressources au sein d'une organisation, garantissant des limites de confiance distinctes pour la gestion des ressources et le contrôle d'accès. Il est recommandé de créer des espaces de travail distincts pour chaque équipe. + +Ici, nous créons un chatflow nommé ** Chatflow1 ** dans ** workspace1 **: + +
    + +Lorsque nous passons à ** workspace2 **, ** Chatflow1 ** ne sera pas visible. Cela s'applique à toutes les ressources telles que les flux d'agent, les outils, les assistants, etc. + +
    + +Le diagramme ci-dessous illustre la relation entre les organisations, les espaces de travail et les différentes ressources associées et contenues dans un espace de travail. + +
    + +## Partage d'identification + +Vous pouvez partager des informations d'identification à d'autres espaces de travail. Cela permet aux utilisateurs de réutiliser le même ensemble d'identification dans différents espaces de travail. + +Après avoir créé un diplôme, l'administrateur de compte ou l'utilisateur avec l'autorisation de partage des informations d'identification du RBAC pourra cliquer sur Partager: + +
    + +L'utilisateur peut sélectionner les espaces de travail pour partager les informations d'identification avec: + +
    + +Maintenant, passez à l'espace de travail où les informations d'identification ont été partagées, vous verrez les informations d'identification partagées. L'utilisateur n'est pas en mesure de modifier les informations d'identification partagées. + +
    + +## Suppression d'un espace de travail + +Actuellement, seul l'administrateur du compte peut supprimer les espaces de travail. Par défaut, vous n'êtes pas en mesure de supprimer un espace de travail s'il y a encore des utilisateurs dans cet espace de travail. + +
    + +Vous devrez d'abord dissoudre tous les utilisateurs invités. Cela permettait de la flexibilité au cas où vous souhaitez simplement supprimer certains utilisateurs d'un espace de travail. Notez que le propriétaire de l'organisation qui a créé l'espace de travail n'est pas en mesure d'être non lié à un espace de travail. + +
    + +Après avoir désabillé les utilisateurs invités, et le seul utilisateur laissé dans l'espace de travail est le propriétaire de l'organisation, le bouton de suppression est maintenant cliquable: + +
    + +La suppression d'un espace de travail est une action irréversible et sera en cascade de supprimer tous les éléments de cet espace de travail. Vous verrez une boîte d'avertissement: + +
    + +Après avoir supprimé un espace de travail, l'utilisateur se repliera à l'espace de travail par défaut. L'espace de travail par défaut qui a été automatiquement créé au début n'est pas en mesure d'être supprimé.
    variable opération Nom
    $ Flow.State.Messages [-1] .Content est oui Sortie 1
    variable opération Nom
    $ Flow.State.Orderstatus est CONFORMÉ Sortie 1
    Description Options / Syntax
    variable l'élément variable ou de données à évaluer dans la condition. Cela peut inclure des données de la sortie de l'agent. - $flow.output.content (Agent Output - string)
    - $Flow.Output. (sortie de la touche JSON de l'agent - String / Number)
    - $flow.state.messages.length (Total Messages)
    - $Flow.State.Messages [0] .Con (First Message Content)
    - $flow.state.messages[-1].con (Last Message Content)
    - $Vars. (variable globale)
    Opération - La comparaison ou le fonctionnement logique pour effectuer sur la variable. - Contient
    - ne contient pas
    - est
    - ENTRÉE
    - Is
    - est
    - est
    - ENTROY Pas vide
    - supérieur à
    - inférieur à
    - égal à
    - pas égal à
    - supérieur ou égal à
    - inférieur ou égal à
    la valeur pour comparer la variable. Examples: "yes", 10, "Hello"
    Output NameThe name of the output path to follow if the condition evaluates to true.- User-defined name (e.g., "Agent1", "End", "Loop")
    requis Description
    Démarrer le nœud oui reçoit l'état du nœud de départ. Cela permet au nœud de l'agent de condition évaluer les conditions en fonction du contexte initial de la conversation, y compris tout état personnalisé.
    Node d'agent oui reçoit la sortie du nœud de l'agent. Cela permet au nœud de l'agent de condition de prendre des décisions en fonction des actions de l'agent et de l'historique de conversation, y compris tout état personnalisé.
    LLM Node Oui Reçoit la sortie du nœud LLM. Cela permet au nœud d'agent de condition évaluer les conditions en fonction de la réponse de la LLM et de l'historique de conversation, y compris tout état personnalisé.
    Le nœud d'outil oui reçoit la sortie du nœud du nœud de l'outil. Cela permet au nœud de l'agent de condition de prendre des décisions en fonction des résultats de l'exécution de l'outil et de l'historique de conversation, y compris tout état personnalisé.
    Paramètre requis Description
    Name NO Ajouter un nom descriptif à la condition NODE NODNE TO IMPORT facilement.
    Condition Oui C'est là que nous définissons la logique qui sera évaluée pour déterminer les chemins de sortie .
    requis Description
    Système invite pas Définit la personne d'agent de condition. Exemple: "Vous êtes un agent de service client spécialisé dans le support technique. State.Messages Array en tant que message humain. Il nous permet à injecter un message de type humain dans le flux de conversation après que le nœud de l'agent de condition ait traité ses entrées et avant que le nœud suivant ne reçoive la sortie structurée de l'agent non pour instruire le nœud d'agent de condition . Type, valeurs d'énumération, description).
    requis Description
    nœud d'agent oui reçoit la sortie d'un agent précède Node. Ces données sont ensuite renvoyées au nœud cible spécifié dans le paramètre "LOOP TO".
    LLM Node Oui reçoit la sortie d'un nœud LLM précédent. Ces données sont ensuite renvoyées au nœud cible spécifié dans le paramètre "Loop to".
    Node d'outil Oui reçoit la sortie d'un nœud d'outil de précédent. Ces données sont ensuite renvoyées au nœud cible spécifié dans le paramètre "boucle à"
    Node de condition Oui reçoit la sortie d'un nœud de condition de condition de précédent. Ces données sont ensuite renvoyées au nœud cible spécifié dans le paramètre "Loop to".
    Le nœud de l'agent de condition oui reçoit la sortie d'un nœud d'agent de condition précédant. Ces données sont ensuite renvoyées au nœud cible spécifié dans le paramètre "Loop to".
    requis Description
    ("Loop to") où le flux conversationnel doit être redirigé. Ce nœud cible doit être un nœud d'agent ou llm nœud .
    requis Description
    nœud Oui L'agent reçoit la sortie finale de l'agent précède de l'agent de précons Traitement.
    LLM Node Oui reçoit la sortie finale d'un nœud LLM précédent, indiquant l'extrémité du traitement du nœud LLM.
    Tool Le nœud Oui reçoit la sortie finale d'un nœud d'outil précédent, indiquant l'achèvement de l'exécution du nœud d'outil.
    Condition Node Oui Réception de la sortie finale de la condition de Node, oui, la condition de Node, vous récepte la condition de Node, oui, la condition de Node>
    Node d'agent de condition Oui reçoit la sortie finale d'un nœud de condition précédente, indiquant l'achèvement du traitement du nœud de l'agent de condition. + +{% hint style = "info"%} +Le nœud de fin ** nécessite au moins une connexion à partir des nœuds suivants **: nœud d'agent, nœud LLM ou nœud d'outil. +{% EndHint%} + +### Sorties + +Le nœud de fin ** n'a pas de connexions de sortie ** car elle signifie la terminaison du flux d'informations. + +### Meilleures pratiques + +{% Tabs%} +{% Tab Title = "Pro Tips"%} +** Fournir une réponse finale ** + +Le cas échéant, connectez le nœud final à un nœud LLM ou agent dédié pour générer un message final ou un résumé pour l'utilisateur, en fournissant la fermeture à la conversation. +{% endtab%} + +{% tab title = "Pièges de potential"%} +** terminaison de conversation prématurée ** + +* ** Problème: ** Le nœud de fin est placé trop tôt dans le workflow, ce qui a fait fin la conversation avant la fin de toutes les étapes nécessaires ou que la demande de l'utilisateur est entièrement traitée. +* ** Exemple: ** Un chatbot conçu pour collecter les commentaires de l'utilisateur met fin à la conversation après que l'utilisateur a fourni son premier commentaire, sans lui donner l'occasion de fournir des commentaires supplémentaires ou de poser des questions. +* ** Solution: ** Passez en revue votre logique de workflow et assurez-vous que le nœud final est placé uniquement une fois toutes les étapes essentielles terminées ou que l'utilisateur a explicitement indiqué leur intention de mettre fin à la conversation. + +** Manque de fermeture pour l'utilisateur ** + +* ** Problème: ** La conversation se termine brusquement sans signal clair à l'utilisateur ou un message final qui donne un sentiment de fermeture. +* ** Exemple: ** Un chatbot de support client termine la conversation immédiatement après avoir résolu un problème, sans confirmer la résolution avec l'utilisateur ou offrir une aide supplémentaire. +* ** Solution: ** Connectez le nœud final à un nœud LLM ou agent dédicatoire pour générer une réponse finale qui résume la conversation, confirme toutes les actions prises et donne un sentiment de fermeture pour l'utilisateur. +{% endtab%} +{% endtabs%} + +*** + +## Nœud de condition vs nœud d'agent de condition + +Les nœuds d'agent de condition et de condition sont essentiels dans l'architecture d'agent séquentiel de Flowise pour créer des expériences conversationnelles dynamiques. + +Ces nœuds permettent des flux de travail adaptatifs, répondant à la saisie, au contexte et aux décisions complexes, mais diffèrent dans leur approche de l'évaluation et de la sophistication des conditions. + + + + Node de condition + +**But** + +Pour créer des branches basées sur des conditions logiques simples et prédéfinies. + +** Évaluation des conditions ** + +Utilise une interface basée sur une table ou un éditeur de code JavaScript pour définir les conditions qui sont vérifiées par rapport à l'état personnalisé et / ou l'historique complet de la conversation. + +** Comportement de sortie ** + +* Prend en charge plusieurs chemins de sortie, chacun associé à une condition spécifique. +* Les conditions sont évaluées dans l'ordre. La première condition de correspondance détermine la sortie. +* Si aucune condition n'est remplie, le débit suit une sortie par défaut "End". + +** le mieux adapté pour ** + +* Des décisions de routage simples basées sur des conditions facilement définissables. +* Workflows où la logique peut être exprimée à l'aide de comparaisons simples, de vérifications de mots clés ou de valeurs de variable d'état personnalisées. + + + + + + Condition d'agent nœud + +**But** + +Pour créer un routage dynamique basé sur l'analyse par un agent de la conversation et de sa sortie structurée. + +** Évaluation des conditions ** + +* Si aucun modèle de chat n'est connecté, il utilise le System LLM par défaut (à partir du nœud de démarrage) pour traiter l'historique de conversation et tout état personnalisé. +* Il peut générer une sortie structurée, qui est ensuite utilisée pour l'évaluation des conditions. +* Utilise une interface basée sur une table ou un éditeur de code JavaScript pour définir des conditions qui sont vérifiées par rapport à la sortie de l'agent, structurées ou non. + +** Comportement de sortie ** + +Identique au nœud de condition: + +* Prend en charge plusieurs chemins de sortie, chacun associé à une condition spécifique. +* Les conditions sont évaluées dans l'ordre. La première condition de correspondance détermine la sortie. +* Si aucune condition n'est remplie, le flux suit la sortie par défaut "End". + +** le mieux adapté pour ** + +* Des décisions de routage plus complexes qui nécessitent une compréhension du contexte de la conversation, de l'intention des utilisateurs ou des facteurs nuancés. +* Scénarios où des conditions logiques simples sont insuffisantes pour capturer la logique de routage souhaitée. +* ** Exemple: ** Un chatbot doit déterminer si la question d'un utilisateur est liée à une catégorie de produit spécifique. Un nœud d'agent de condition peut être utilisé pour analyser la requête de l'utilisateur et sortir un objet JSON avec un champ "Catégorie". Le nœud d'agent de condition peut ensuite utiliser cette sortie structurée pour acheminer l'utilisateur vers le spécialiste du produit approprié. + + + +### Résumant + +
    Condition Node Condition Agent Node
    Logique de décision basée sur des conditions logiques prédéfinies.
    Basée sur des conditions logiques prédéfinies. Sortie.
    Implication de l'agent Aucun agent impliqué dans l'évaluation de la condition. utilise un agent pour traiter le contexte et générer une sortie pour les conditions. Encouragé pour l'évaluation fiable des conditions.
    Évaluation de la condition définir les conditions qui sont vérifiées par rapport à l'historique complet de la conversation. peut définir les conditions qui sont vérifiées contre la sortie de l'agent, structurée ou non.
    complexité Convient à la logique de ramification simple.
    Cases idéales à base d'utilisateurs Mots-clés dans la conversation.
    • Routing basé sur le sentiment, l'intention ou les facteurs contextuels complexes.
    Agent nœud llm nœud
    L'interaction de l'outil appelle directement les outils multiples, des outils intégrés, hitL. via le nœud d'outil, contrôle hitl granulaire au niveau de l'outil.
    Human-in-the-Loop (HITL) HITL contrôlé au niveau du nœud d'agent (tous les outils connectés affectés). HITL Géré au niveau du nœud d'outil individuel (plus La flexibilité).
    Sortie structurée s'appuie sur le format de sortie naturel du LLM. Sortie sur le format de sortie naturelle du LLM, mais, si nécessaire, fournit une définition de schéma JSON à la sortie de la structure. Cases
    • Workflows with complex tool orchestration.
    • Simplified HITL at the Agent Level.
    • Extracting structured data from LLM output
    • Workflows with complex LLM and tool interactions, requiring mixed HITL niveaux.