diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 59016e787d..2133263a33 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -97,7 +97,7 @@ Pre-requisite: You should use the Intellij IDE, which can be downloaded [here](h 2. Run `scripts/install-dependencies.sh` or `scripts/install-dependencies.ps1` on Windows. This will install and build all of the necessary dependencies. 3. To test the plugin, select the "Run Plugin" Gradle configuration and click the "Run" or "Debug" button as shown in this screenshot: ![img](./media/IntelliJRunPluginScreenshot.png) -4. To package the extension, choose the "Build Plugin" Gradle configuration. This will generate a .zip file in `extensions/intellij/build/distributions` with the version defined in `extensions/intellij/gradle.properties`. +4. To package the extension, run `./gradlew build` (or `./gradlew.bat build` on Windows) from the `extensions/intellij` directory. This will generate a .zip file in `extensions/intellij/build/distributions` with the version defined in `extensions/intellij/gradle.properties`. 5. If you make changes, you may need to re-build before running the "Build Plugin" configuration a. If you change code from the `core` or `binary` directories, make sure to run `npm run build` from the `binary` directory to create a new binary. diff --git a/core/autocomplete/templates.ts b/core/autocomplete/templates.ts index 06e74dbed0..af753cae57 100644 --- a/core/autocomplete/templates.ts +++ b/core/autocomplete/templates.ts @@ -24,6 +24,13 @@ const stableCodeFimTemplate: AutocompleteTemplate = { }, }; +const codestralFimTemplate: AutocompleteTemplate = { + template: "[SUFFIX]{{{suffix}}}[PREFIX]{{{prefix}}}", + completionOptions: { + stop: ["[PREFIX]", "[SUFFIX]"], + }, +}; + const codegemmaFimTemplate: AutocompleteTemplate = { template: "<|fim_prefix|>{{{prefix}}}<|fim_suffix|>{{{suffix}}}<|fim_middle|>", @@ -227,6 +234,10 @@ export function getTemplateForModel(model: string): AutocompleteTemplate { return stableCodeFimTemplate; } + if (lowerCaseModel.includes("codestral")) { + return codestralFimTemplate; + } + if (lowerCaseModel.includes("codegemma")) { return codegemmaFimTemplate; } diff --git a/core/index.d.ts b/core/index.d.ts index 29c95145b9..a182794491 100644 --- a/core/index.d.ts +++ b/core/index.d.ts @@ -304,6 +304,9 @@ export interface LLMOptions { useLegacyCompletionsEndpoint?: boolean; + // Cloudflare options + accountId?: string; + // Azure options engine?: string; apiVersion?: string; @@ -539,7 +542,8 @@ type ModelProvider = | "groq" | "continue-proxy" | "fireworks" - | "custom"; + | "custom" + | "cloudflare"; export type ModelName = | "AUTODETECT" diff --git a/core/llm/index.ts b/core/llm/index.ts index 1be64dc4f6..1ad5945184 100644 --- a/core/llm/index.ts +++ b/core/llm/index.ts @@ -99,6 +99,7 @@ export abstract class BaseLLM implements ILLM { apiType?: string; region?: string; projectId?: string; + accountId?: string; private _llmOptions: LLMOptions; @@ -150,6 +151,7 @@ export abstract class BaseLLM implements ILLM { if (this.apiBase && !this.apiBase.endsWith("/")) { this.apiBase = `${this.apiBase}/`; } + this.accountId = options.accountId; this.engine = options.engine; this.apiVersion = options.apiVersion; diff --git a/core/llm/llms/Cloudflare.ts b/core/llm/llms/Cloudflare.ts new file mode 100644 index 0000000000..ff2c11564d --- /dev/null +++ b/core/llm/llms/Cloudflare.ts @@ -0,0 +1,62 @@ +import { BaseLLM } from "../index.js"; +import { ChatMessage, CompletionOptions, ModelProvider } from "../../index.js"; +import { stripImages } from "../countTokens.js"; +import { streamSse } from "../stream.js"; + +export default class Cloudflare extends BaseLLM { + static providerName: ModelProvider = "cloudflare"; + + private _convertArgs(options: CompletionOptions) { + const finalOptions = { + max_tokens: options.maxTokens, + }; + + return finalOptions; + } + + protected async *_streamChat( + messages: ChatMessage[], + options: CompletionOptions, + ): AsyncGenerator { + const headers = { + "Content-Type": "application/json", + Authorization: `Bearer ${this.apiKey}`, + ...this.requestOptions?.headers, + }; + + const resp = await this.fetch( + new URL( + `https://api.cloudflare.com/client/v4/accounts/${this.accountId}/ai/v1/chat/completions`, + ), + { + method: "POST", + headers, + body: JSON.stringify({ + messages, + stream: true, + model: this.model, + ...this._convertArgs(options), + }), + }, + ); + + for await (const value of streamSse(resp)) { + console.log(value); + if (value.choices?.[0]?.delta?.content) { + yield value.choices[0].delta; + } + } + } + + protected async *_streamComplete( + prompt: string, + options: CompletionOptions, + ): AsyncGenerator { + for await (const chunk of this._streamChat( + [{ role: "user", content: prompt }], + options, + )) { + yield stripImages(chunk.content); + } + } +} diff --git a/core/llm/llms/index.ts b/core/llm/llms/index.ts index 33165263bd..6cc6752194 100644 --- a/core/llm/llms/index.ts +++ b/core/llm/llms/index.ts @@ -29,6 +29,7 @@ import Replicate from "./Replicate.js"; import TextGenWebUI from "./TextGenWebUI.js"; import Together from "./Together.js"; import ContinueProxy from "./stubs/ContinueProxy.js"; +import Cloudflare from "./Cloudflare"; function convertToLetter(num: number): string { let result = ""; @@ -99,6 +100,7 @@ const LLMs = [ Groq, Fireworks, ContinueProxy, + Cloudflare, ]; export async function llmFromDescription( diff --git a/docs/docs/reference/Model Providers/cloudflare.md b/docs/docs/reference/Model Providers/cloudflare.md new file mode 100644 index 0000000000..749472a45d --- /dev/null +++ b/docs/docs/reference/Model Providers/cloudflare.md @@ -0,0 +1,46 @@ +# Cloudflare Workers AI + +Cloudflare Workers AI can be used for both chat and tab autocompletion in Continue. To setup Cloudflare Workers AI, add the following to your `config.json` file: + +```json title="~/.continue/config.json" +{ + "models": [ + { + "accountId": "YOUR CLOUDFLARE ACCOUNT ID", + "apiKey": "YOUR CLOUDFLARE API KEY", + "contextLength": 2400, + "completionOptions": { + "maxTokens": 500 + }, + "model": "@cf/meta/llama-3-8b-instruct", // This can be the name of any model supported by Workers AI + "provider": "cloudflare", + "title": "Llama 3 8B" + }, + { + "accountId": "YOUR CLOUDFLARE ACCOUNT ID", + "apiKey": "YOUR CLOUDFLARE API KEY", + "contextLength": 2400, + "completionOptions": { + "maxTokens": 500 + }, + "model": "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", + "provider": "cloudflare", + "title": "DeepSeek Coder 6.7b Instruct" + } + ... + "tabAutocompleteModel": { + "accountId": "YOUR CLOUDFLARE ACCOUNT ID", + "apiKey": "YOUR CLOUDFLARE API KEY", + "model": "@hf/thebloke/deepseek-coder-6.7b-base-awq", + "provider": "cloudflare", + "title": "DeepSeek 7b" + }, + ] +} +``` + +Visit the [Cloudflare dashboard](https://dash.cloudflare.com/) to [create an API key](https://developers.cloudflare.com/fundamentals/api/get-started/create-token/). + +Review [available models](https://developers.cloudflare.com/workers-ai/models/) on Workers AI + +[View the source](https://github.com/continuedev/continue/blob/main/core/llm/llms/Cloudflare.ts) diff --git a/docs/docs/walkthroughs/set-up-codestral.md b/docs/docs/walkthroughs/set-up-codestral.md index da2ed46cfb..1f82ca85ce 100644 --- a/docs/docs/walkthroughs/set-up-codestral.md +++ b/docs/docs/walkthroughs/set-up-codestral.md @@ -14,7 +14,7 @@ keywords: [codestral, mistral, model setup] 2. Click on the gear icon in the bottom right corner of the Continue window to open `~/.continue/config.json` (MacOS) / `%userprofile%\.continue\config.json` (Windows) -3. Log in and create an API key on Mistral AI's La Plateforme [here](https://console.mistral.ai/codestral) +3. Log in and create an API key on Mistral AI's La Plateforme [here](https://console.mistral.ai/codestral). Make sure you get an API key from the "Codestral" page, as an API key for the normal "api.mistral.ai" API will not work. 4. To use Codestral as your model for both `autocomplete` and `chat`, replace `[API_KEY]` with your Mistral API key below and add it to your `config.json` file: diff --git a/docs/static/schemas/config.json b/docs/static/schemas/config.json index 1c8b3fc3ba..ac61c321af 100644 --- a/docs/static/schemas/config.json +++ b/docs/static/schemas/config.json @@ -150,7 +150,8 @@ "flowise", "groq", "fireworks", - "continue-proxy" + "continue-proxy", + "cloudflare" ], "markdownEnumDescriptions": [ "### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/openai)", @@ -169,7 +170,8 @@ "### Llamafile\nTo get started with llamafiles, find and download a binary on their [GitHub repo](https://github.com/Mozilla-Ocho/llamafile#binary-instructions). Then run it with the following command:\n\n```shell\nchmod +x ./llamafile\n./llamafile\n```\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/llamafile)", "### Mistral API\n\nTo get access to the Mistral API, obtain your API key from the [Mistral platform](https://docs.mistral.ai/)", "### DeepInfra\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/deepinfra)", - "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models." + "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models.", + "### Cloudflare Workers AI\n\n[Reference](https://developers.cloudflare.com/workers-ai/)" ], "type": "string" }, @@ -313,7 +315,8 @@ "gemini", "huggingface-inference-api", "replicate", - "together" + "together", + "cloudflare" ] } }, @@ -359,6 +362,64 @@ } } }, + { + "if": { + "properties": { + "provider": { + "enum": ["cloudflare"] + } + }, + "required": ["provider"] + }, + "then": { + "properties": { + "accountId": { + "type": "string" + }, + "model": { + "anyOf": [ + { + "enum": [ + "@cf/meta/llama-3-8b-instruct", + "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", + "@cf/deepseek-ai/deepseek-math-7b-instruct", + "@cf/thebloke/discolm-german-7b-v1-awq", + "@cf/tiiuae/falcon-7b-instruct", + "@cf/google/gemma-2b-it-lora", + "@hf/google/gemma-7b-it", + "@cf/google/gemma-7b-it-lora", + "@hf/nousresearch/hermes-2-pro-mistral-7b", + "@cf/meta/llama-2-7b-chat-fp16", + "@cf/meta/llama-2-7b-chat-int8", + "@cf/meta-llama/llama-2-7b-chat-hf-lora", + "@hf/thebloke/llama-2-13b-chat-awq", + "@hf/thebloke/llamaguard-7b-awq", + "@cf/mistral/mistral-7b-instruct-v0.1", + "@hf/mistral/mistral-7b-instruct-v0.2", + "@cf/mistral/mistral-7b-instruct-v0.2-lora", + "@hf/thebloke/neural-chat-7b-v3-1-awq", + "@cf/openchat/openchat-3.5-0106", + "@hf/thebloke/openhermes-2.5-mistral-7b-awq", + "@cf/microsoft/phi-2", + "@cf/qwen/qwen1.5-0.5b-chat", + "@cf/qwen/qwen1.5-1.8b-chat", + "@cf/qwen/qwen1.5-7b-chat-awq", + "@cf/qwen/qwen1.5-14b-chat-awq", + "@cf/defog/sqlcoder-7b-2", + "@hf/nexusflow/starling-lm-7b-beta", + "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", + "@hf/thebloke/zephyr-7b-beta-awq", + "@hf/thebloke/deepseek-coder-6.7b-base-awq" + ] + }, + { + "type": "string" + } + ] + } + } + } + }, { "if": { "properties": { diff --git a/extensions/intellij/src/main/resources/config_schema.json b/extensions/intellij/src/main/resources/config_schema.json index 1c8b3fc3ba..ac61c321af 100644 --- a/extensions/intellij/src/main/resources/config_schema.json +++ b/extensions/intellij/src/main/resources/config_schema.json @@ -150,7 +150,8 @@ "flowise", "groq", "fireworks", - "continue-proxy" + "continue-proxy", + "cloudflare" ], "markdownEnumDescriptions": [ "### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/openai)", @@ -169,7 +170,8 @@ "### Llamafile\nTo get started with llamafiles, find and download a binary on their [GitHub repo](https://github.com/Mozilla-Ocho/llamafile#binary-instructions). Then run it with the following command:\n\n```shell\nchmod +x ./llamafile\n./llamafile\n```\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/llamafile)", "### Mistral API\n\nTo get access to the Mistral API, obtain your API key from the [Mistral platform](https://docs.mistral.ai/)", "### DeepInfra\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/deepinfra)", - "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models." + "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models.", + "### Cloudflare Workers AI\n\n[Reference](https://developers.cloudflare.com/workers-ai/)" ], "type": "string" }, @@ -313,7 +315,8 @@ "gemini", "huggingface-inference-api", "replicate", - "together" + "together", + "cloudflare" ] } }, @@ -359,6 +362,64 @@ } } }, + { + "if": { + "properties": { + "provider": { + "enum": ["cloudflare"] + } + }, + "required": ["provider"] + }, + "then": { + "properties": { + "accountId": { + "type": "string" + }, + "model": { + "anyOf": [ + { + "enum": [ + "@cf/meta/llama-3-8b-instruct", + "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", + "@cf/deepseek-ai/deepseek-math-7b-instruct", + "@cf/thebloke/discolm-german-7b-v1-awq", + "@cf/tiiuae/falcon-7b-instruct", + "@cf/google/gemma-2b-it-lora", + "@hf/google/gemma-7b-it", + "@cf/google/gemma-7b-it-lora", + "@hf/nousresearch/hermes-2-pro-mistral-7b", + "@cf/meta/llama-2-7b-chat-fp16", + "@cf/meta/llama-2-7b-chat-int8", + "@cf/meta-llama/llama-2-7b-chat-hf-lora", + "@hf/thebloke/llama-2-13b-chat-awq", + "@hf/thebloke/llamaguard-7b-awq", + "@cf/mistral/mistral-7b-instruct-v0.1", + "@hf/mistral/mistral-7b-instruct-v0.2", + "@cf/mistral/mistral-7b-instruct-v0.2-lora", + "@hf/thebloke/neural-chat-7b-v3-1-awq", + "@cf/openchat/openchat-3.5-0106", + "@hf/thebloke/openhermes-2.5-mistral-7b-awq", + "@cf/microsoft/phi-2", + "@cf/qwen/qwen1.5-0.5b-chat", + "@cf/qwen/qwen1.5-1.8b-chat", + "@cf/qwen/qwen1.5-7b-chat-awq", + "@cf/qwen/qwen1.5-14b-chat-awq", + "@cf/defog/sqlcoder-7b-2", + "@hf/nexusflow/starling-lm-7b-beta", + "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", + "@hf/thebloke/zephyr-7b-beta-awq", + "@hf/thebloke/deepseek-coder-6.7b-base-awq" + ] + }, + { + "type": "string" + } + ] + } + } + } + }, { "if": { "properties": { diff --git a/extensions/intellij/src/test/kotlin/com/github/continuedev/continueintellijextension/MyPluginTest.kt b/extensions/intellij/src/test/kotlin/com/github/continuedev/continueintellijextension/MyPluginTest.kt index 0e3bf10bfc..09eb8a95f3 100644 --- a/extensions/intellij/src/test/kotlin/com/github/continuedev/continueintellijextension/MyPluginTest.kt +++ b/extensions/intellij/src/test/kotlin/com/github/continuedev/continueintellijextension/MyPluginTest.kt @@ -26,10 +26,6 @@ class MyPluginTest : BasePlatformTestCase() { } } - fun testRename() { - myFixture.testRename("foo.xml", "foo_after.xml", "a2") - } - fun testProjectService() { val projectService = project.service() diff --git a/extensions/vscode/config_schema.json b/extensions/vscode/config_schema.json index 1c8b3fc3ba..ac61c321af 100644 --- a/extensions/vscode/config_schema.json +++ b/extensions/vscode/config_schema.json @@ -150,7 +150,8 @@ "flowise", "groq", "fireworks", - "continue-proxy" + "continue-proxy", + "cloudflare" ], "markdownEnumDescriptions": [ "### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/openai)", @@ -169,7 +170,8 @@ "### Llamafile\nTo get started with llamafiles, find and download a binary on their [GitHub repo](https://github.com/Mozilla-Ocho/llamafile#binary-instructions). Then run it with the following command:\n\n```shell\nchmod +x ./llamafile\n./llamafile\n```\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/llamafile)", "### Mistral API\n\nTo get access to the Mistral API, obtain your API key from the [Mistral platform](https://docs.mistral.ai/)", "### DeepInfra\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/deepinfra)", - "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models." + "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models.", + "### Cloudflare Workers AI\n\n[Reference](https://developers.cloudflare.com/workers-ai/)" ], "type": "string" }, @@ -313,7 +315,8 @@ "gemini", "huggingface-inference-api", "replicate", - "together" + "together", + "cloudflare" ] } }, @@ -359,6 +362,64 @@ } } }, + { + "if": { + "properties": { + "provider": { + "enum": ["cloudflare"] + } + }, + "required": ["provider"] + }, + "then": { + "properties": { + "accountId": { + "type": "string" + }, + "model": { + "anyOf": [ + { + "enum": [ + "@cf/meta/llama-3-8b-instruct", + "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", + "@cf/deepseek-ai/deepseek-math-7b-instruct", + "@cf/thebloke/discolm-german-7b-v1-awq", + "@cf/tiiuae/falcon-7b-instruct", + "@cf/google/gemma-2b-it-lora", + "@hf/google/gemma-7b-it", + "@cf/google/gemma-7b-it-lora", + "@hf/nousresearch/hermes-2-pro-mistral-7b", + "@cf/meta/llama-2-7b-chat-fp16", + "@cf/meta/llama-2-7b-chat-int8", + "@cf/meta-llama/llama-2-7b-chat-hf-lora", + "@hf/thebloke/llama-2-13b-chat-awq", + "@hf/thebloke/llamaguard-7b-awq", + "@cf/mistral/mistral-7b-instruct-v0.1", + "@hf/mistral/mistral-7b-instruct-v0.2", + "@cf/mistral/mistral-7b-instruct-v0.2-lora", + "@hf/thebloke/neural-chat-7b-v3-1-awq", + "@cf/openchat/openchat-3.5-0106", + "@hf/thebloke/openhermes-2.5-mistral-7b-awq", + "@cf/microsoft/phi-2", + "@cf/qwen/qwen1.5-0.5b-chat", + "@cf/qwen/qwen1.5-1.8b-chat", + "@cf/qwen/qwen1.5-7b-chat-awq", + "@cf/qwen/qwen1.5-14b-chat-awq", + "@cf/defog/sqlcoder-7b-2", + "@hf/nexusflow/starling-lm-7b-beta", + "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", + "@hf/thebloke/zephyr-7b-beta-awq", + "@hf/thebloke/deepseek-coder-6.7b-base-awq" + ] + }, + { + "type": "string" + } + ] + } + } + } + }, { "if": { "properties": { diff --git a/extensions/vscode/continue_rc_schema.json b/extensions/vscode/continue_rc_schema.json index c79eeeee6e..fcbd553581 100644 --- a/extensions/vscode/continue_rc_schema.json +++ b/extensions/vscode/continue_rc_schema.json @@ -150,7 +150,8 @@ "flowise", "groq", "fireworks", - "continue-proxy" + "continue-proxy", + "cloudflare" ], "markdownEnumDescriptions": [ "### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/openai)", @@ -169,7 +170,8 @@ "### Llamafile\nTo get started with llamafiles, find and download a binary on their [GitHub repo](https://github.com/Mozilla-Ocho/llamafile#binary-instructions). Then run it with the following command:\n\n```shell\nchmod +x ./llamafile\n./llamafile\n```\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/llamafile)", "### Mistral API\n\nTo get access to the Mistral API, obtain your API key from the [Mistral platform](https://docs.mistral.ai/)", "### DeepInfra\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/deepinfra)", - "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models." + "### Continue Proxy\n\nContinue Enterprise users can use the Continue Proxy as a single point of access to models.", + "### Cloudflare Workers AI\n\n[Reference](https://developers.cloudflare.com/workers-ai/)" ], "type": "string" }, @@ -268,10 +270,7 @@ "apiType": { "title": "Api Type", "markdownDescription": "OpenAI API type, either `openai` or `azure`", - "enum": [ - "openai", - "azure" - ] + "enum": ["openai", "azure"] }, "apiVersion": { "title": "Api Version", @@ -284,11 +283,7 @@ "type": "string" } }, - "required": [ - "title", - "provider", - "model" - ], + "required": ["title", "provider", "model"], "allOf": [ { "if": { @@ -298,9 +293,7 @@ } }, "not": { - "required": [ - "provider" - ] + "required": ["provider"] } }, "then": { @@ -322,52 +315,38 @@ "gemini", "huggingface-inference-api", "replicate", - "together" + "together", + "cloudflare" ] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { - "required": [ - "apiKey" - ] + "required": ["apiKey"] } }, { "if": { "properties": { "provider": { - "enum": [ - "huggingface-tgi", - "huggingface-inference-api" - ] + "enum": ["huggingface-tgi", "huggingface-inference-api"] } } }, "then": { - "required": [ - "apiBase" - ] + "required": ["apiBase"] }, - "required": [ - "provider" - ] + "required": ["provider"] }, { "if": { "properties": { "provider": { - "enum": [ - "openai" - ] + "enum": ["openai"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -387,14 +366,68 @@ "if": { "properties": { "provider": { - "enum": [ - "openai" + "enum": ["cloudflare"] + } + }, + "required": ["provider"] + }, + "then": { + "properties": { + "accountId": { + "type": "string" + }, + "model": { + "anyOf": [ + { + "enum": [ + "@cf/meta/llama-3-8b-instruct", + "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", + "@cf/deepseek-ai/deepseek-math-7b-instruct", + "@cf/thebloke/discolm-german-7b-v1-awq", + "@cf/tiiuae/falcon-7b-instruct", + "@cf/google/gemma-2b-it-lora", + "@hf/google/gemma-7b-it", + "@cf/google/gemma-7b-it-lora", + "@hf/nousresearch/hermes-2-pro-mistral-7b", + "@cf/meta/llama-2-7b-chat-fp16", + "@cf/meta/llama-2-7b-chat-int8", + "@cf/meta-llama/llama-2-7b-chat-hf-lora", + "@hf/thebloke/llama-2-13b-chat-awq", + "@hf/thebloke/llamaguard-7b-awq", + "@cf/mistral/mistral-7b-instruct-v0.1", + "@hf/mistral/mistral-7b-instruct-v0.2", + "@cf/mistral/mistral-7b-instruct-v0.2-lora", + "@hf/thebloke/neural-chat-7b-v3-1-awq", + "@cf/openchat/openchat-3.5-0106", + "@hf/thebloke/openhermes-2.5-mistral-7b-awq", + "@cf/microsoft/phi-2", + "@cf/qwen/qwen1.5-0.5b-chat", + "@cf/qwen/qwen1.5-1.8b-chat", + "@cf/qwen/qwen1.5-7b-chat-awq", + "@cf/qwen/qwen1.5-14b-chat-awq", + "@cf/defog/sqlcoder-7b-2", + "@hf/nexusflow/starling-lm-7b-beta", + "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", + "@hf/thebloke/zephyr-7b-beta-awq", + "@hf/thebloke/deepseek-coder-6.7b-base-awq" + ] + }, + { + "type": "string" + } ] } + } + } + }, + { + "if": { + "properties": { + "provider": { + "enum": ["openai"] + } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -425,14 +458,10 @@ "if": { "properties": { "provider": { - "enum": [ - "replicate" - ] + "enum": ["replicate"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -462,14 +491,10 @@ "if": { "properties": { "provider": { - "enum": [ - "free-trial" - ] + "enum": ["free-trial"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -494,9 +519,7 @@ "if": { "properties": { "provider": { - "enum": [ - "openai" - ] + "enum": ["openai"] }, "apiType": { "not": { @@ -504,9 +527,7 @@ } } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -564,14 +585,10 @@ "if": { "properties": { "provider": { - "enum": [ - "anthropic" - ] + "enum": ["anthropic"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -599,22 +616,15 @@ "if": { "properties": { "provider": { - "enum": [ - "cohere" - ] + "enum": ["cohere"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { "model": { - "enum": [ - "command-r", - "command-r-plus" - ] + "enum": ["command-r", "command-r-plus"] } } } @@ -623,14 +633,10 @@ "if": { "properties": { "provider": { - "enum": [ - "bedrock" - ] + "enum": ["bedrock"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -655,14 +661,10 @@ "if": { "properties": { "provider": { - "enum": [ - "gemini" - ] + "enum": ["gemini"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -681,14 +683,10 @@ "if": { "properties": { "provider": { - "enum": [ - "together" - ] + "enum": ["together"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -722,14 +720,10 @@ "if": { "properties": { "provider": { - "enum": [ - "deepinfra" - ] + "enum": ["deepinfra"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -752,9 +746,7 @@ ] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -800,14 +792,10 @@ "if": { "properties": { "provider": { - "enum": [ - "ollama" - ] + "enum": ["ollama"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -855,14 +843,10 @@ "if": { "properties": { "provider": { - "enum": [ - "mistral" - ] + "enum": ["mistral"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -883,14 +867,10 @@ "if": { "properties": { "provider": { - "enum": [ - "groq" - ] + "enum": ["groq"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -911,21 +891,15 @@ "if": { "properties": { "provider": { - "enum": [ - "fireworks" - ] + "enum": ["fireworks"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { "model": { - "enum": [ - "starcoder-7b" - ] + "enum": ["starcoder-7b"] } } } @@ -937,30 +911,20 @@ "const": "azure" } }, - "required": [ - "apiType" - ] + "required": ["apiType"] }, "then": { - "required": [ - "engine", - "apiVersion", - "apiBase" - ] + "required": ["engine", "apiVersion", "apiBase"] } }, { "if": { "properties": { "provider": { - "enum": [ - "openai" - ] + "enum": ["openai"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -977,9 +941,7 @@ "const": "llamafile" } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -993,14 +955,10 @@ "if": { "properties": { "provider": { - "enum": [ - "text-gen-webui" - ] + "enum": ["text-gen-webui"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -1014,14 +972,10 @@ "if": { "properties": { "provider": { - "enum": [ - "flowise" - ] + "enum": ["flowise"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { "properties": { @@ -1048,10 +1002,7 @@ "type": "string" } }, - "required": [ - "key", - "value" - ] + "required": ["key", "value"] } }, "additionalFlowiseConfiguration": { @@ -1070,10 +1021,7 @@ "description": "Configuration Property value" } }, - "required": [ - "key", - "value" - ] + "required": ["key", "value"] } }, "model": { @@ -1163,9 +1111,7 @@ "type": "string" } }, - "required": [ - "default" - ] + "required": ["default"] }, "SlashCommand": { "title": "SlashCommand", @@ -1224,9 +1170,7 @@ "if": { "properties": { "name": { - "enum": [ - "issue" - ] + "enum": ["issue"] } } }, @@ -1239,23 +1183,17 @@ "description": "Enter the URL of the GitHub repository for which you want to generate the issue." } }, - "required": [ - "repositoryUrl" - ] + "required": ["repositoryUrl"] } }, - "required": [ - "params" - ] + "required": ["params"] } }, { "if": { "properties": { "name": { - "enum": [ - "edit" - ] + "enum": ["edit"] } } }, @@ -1280,9 +1218,7 @@ "if": { "properties": { "name": { - "enum": [ - "share" - ] + "enum": ["share"] } } }, @@ -1300,10 +1236,7 @@ } } ], - "required": [ - "name", - "description" - ] + "required": ["name", "description"] }, "CustomCommand": { "title": "CustomCommand", @@ -1322,11 +1255,7 @@ "type": "string" } }, - "required": [ - "name", - "prompt", - "description" - ] + "required": ["name", "prompt", "description"] }, "ContextProviderWithParams": { "title": "ContextProviderWithParams", @@ -1399,9 +1328,7 @@ "if": { "properties": { "name": { - "enum": [ - "google" - ] + "enum": ["google"] } } }, @@ -1414,23 +1341,17 @@ "description": "Your API key for https://serper.dev in order to get Google search results" } }, - "required": [ - "serperApiKey" - ] + "required": ["serperApiKey"] } }, - "required": [ - "params" - ] + "required": ["params"] } }, { "if": { "properties": { "name": { - "enum": [ - "open" - ] + "enum": ["open"] } } }, @@ -1452,9 +1373,7 @@ "if": { "properties": { "name": { - "enum": [ - "issue" - ] + "enum": ["issue"] } } }, @@ -1483,37 +1402,24 @@ "type": { "type": "string", "description": "The type of issues to search for", - "enum": [ - "open", - "closed", - "all" - ] + "enum": ["open", "closed", "all"] } }, - "required": [ - "owner", - "repo" - ] + "required": ["owner", "repo"] } } }, - "required": [ - "repos" - ] + "required": ["repos"] } }, - "required": [ - "params" - ] + "required": ["params"] } }, { "if": { "properties": { "name": { - "enum": [ - "database" - ] + "enum": ["database"] } } }, @@ -1531,11 +1437,7 @@ "connection_type": { "type": "string", "description": "The type of database (e.g., 'postgres', 'mysql')", - "enum": [ - "postgres", - "mysql", - "sqlite" - ] + "enum": ["postgres", "mysql", "sqlite"] }, "connection": { "type": "object", @@ -1568,25 +1470,17 @@ "required": [] } }, - "required": [ - "name", - "type", - "connection" - ] + "required": ["name", "type", "connection"] } }, - "required": [ - "connections" - ] + "required": ["connections"] } }, { "if": { "properties": { "name": { - "enum": [ - "gitlab-mr" - ] + "enum": ["gitlab-mr"] } } }, @@ -1607,23 +1501,17 @@ "description": "If you have code selected, filters out comments that aren't related to the selection." } }, - "required": [ - "token" - ] + "required": ["token"] } }, - "required": [ - "params" - ] + "required": ["params"] } }, { "if": { "properties": { "name": { - "enum": [ - "jira" - ] + "enum": ["jira"] } } }, @@ -1669,24 +1557,17 @@ ] } }, - "required": [ - "domain", - "token" - ] + "required": ["domain", "token"] } }, - "required": [ - "params" - ] + "required": ["params"] } }, { "if": { "properties": { "name": { - "enum": [ - "http" - ] + "enum": ["http"] } } }, @@ -1699,24 +1580,17 @@ "description": "The HTTP endpoint of your context provider server." } }, - "required": [ - "url" - ] + "required": ["url"] } }, - "required": [ - "params" - ] + "required": ["params"] } }, { "if": { "properties": { "name": { - "enum": [ - "codebase", - "folder" - ] + "enum": ["codebase", "folder"] } } }, @@ -1751,9 +1625,7 @@ "if": { "properties": { "name": { - "enum": [ - "postgres" - ] + "enum": ["postgres"] } } }, @@ -1805,19 +1677,11 @@ } } }, - "required": [ - "host", - "port", - "user", - "password", - "database" - ] + "required": ["host", "port", "user", "password", "database"] } } ], - "required": [ - "name" - ] + "required": ["name"] }, "SerializedContinueConfig": { "title": "config.json", @@ -1968,46 +1832,32 @@ "$ref": "#/definitions/RequestOptions" } }, - "required": [ - "provider" - ], + "required": ["provider"], "allOf": [ { "if": { "properties": { "provider": { - "enum": [ - "ollama" - ] + "enum": ["ollama"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { - "required": [ - "model" - ] + "required": ["model"] } }, { "if": { "properties": { "provider": { - "enum": [ - "cohere" - ] + "enum": ["cohere"] } }, - "required": [ - "provider" - ] + "required": ["provider"] }, "then": { - "required": [ - "apiKey" - ] + "required": ["apiKey"] } } ] @@ -2018,33 +1868,22 @@ "type": "object", "properties": { "name": { - "enum": [ - "cohere", - "voyage", - "llm", - "free-trial" - ] + "enum": ["cohere", "voyage", "llm", "free-trial"] }, "params": { "type": "object" } }, - "required": [ - "name" - ], + "required": ["name"], "allOf": [ { "if": { "properties": { "name": { - "enum": [ - "cohere" - ] + "enum": ["cohere"] } }, - "required": [ - "name" - ] + "required": ["name"] }, "then": { "properties": { @@ -2066,9 +1905,7 @@ "type": "string" } }, - "required": [ - "apiKey" - ] + "required": ["apiKey"] } } } @@ -2077,14 +1914,10 @@ "if": { "properties": { "name": { - "enum": [ - "llm" - ] + "enum": ["llm"] } }, - "required": [ - "name" - ] + "required": ["name"] }, "then": { "properties": { @@ -2095,9 +1928,7 @@ "type": "string" } }, - "required": [ - "modelTitle" - ] + "required": ["modelTitle"] } } } @@ -2106,14 +1937,10 @@ "if": { "properties": { "name": { - "enum": [ - "voyage" - ] + "enum": ["voyage"] } }, - "required": [ - "name" - ] + "required": ["name"] }, "then": { "properties": { @@ -2124,14 +1951,10 @@ "type": "string" }, "model": { - "enum": [ - "rerank-lite-1" - ] + "enum": ["rerank-lite-1"] } }, - "required": [ - "apiKey" - ] + "required": ["apiKey"] } } } @@ -2187,11 +2010,7 @@ "description": "An optional template string to be used for autocomplete. It will be rendered with the Mustache templating language, and is passed the 'prefix' and 'suffix' variables." }, "multilineCompletions": { - "enum": [ - "always", - "never", - "auto" - ], + "enum": ["always", "never", "auto"], "description": "If set to true, Continue will only complete a single line at a time." }, "useCache": { @@ -2220,10 +2039,7 @@ "type": "object", "properties": { "codeBlockToolbarPosition": { - "enum": [ - "top", - "bottom" - ], + "enum": ["top", "bottom"], "default": "top", "description": "Whether to show the copy and apply code buttons at the top or bottom of code blocks in the sidebar." }, @@ -2240,9 +2056,7 @@ "defaultContext": { "type": "array", "items": { - "enum": [ - "activeFile" - ] + "enum": ["activeFile"] } }, "modelRoles": { @@ -2282,10 +2096,7 @@ }, "mergeBehavior": { "type": "string", - "enum": [ - "merge", - "overwrite" - ], + "enum": ["merge", "overwrite"], "default": "merge", "title": "Merge behavior", "markdownDescription": "If set to 'merge', .continuerc.json will be applied on top of config.json (arrays and objects are merged). If set to 'overwrite', then every top-level property of .continuerc.json will overwrite that property from config.json." @@ -2293,4 +2104,4 @@ } } } -} \ No newline at end of file +} diff --git a/extensions/vscode/package-lock.json b/extensions/vscode/package-lock.json index cd566958b5..f6a494148c 100644 --- a/extensions/vscode/package-lock.json +++ b/extensions/vscode/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.9.140", + "version": "0.9.141", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "continue", - "version": "0.9.140", + "version": "0.9.141", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extensions/vscode/package.json b/extensions/vscode/package.json index 7730fd09b7..672a7902b9 100644 --- a/extensions/vscode/package.json +++ b/extensions/vscode/package.json @@ -1,7 +1,7 @@ { "name": "continue", "icon": "media/icon.png", - "version": "0.9.140", + "version": "0.9.141", "repository": { "type": "git", "url": "https://github.com/continuedev/continue"