diff --git a/packages/tasks/src/local-apps.ts b/packages/tasks/src/local-apps.ts index d9ca9a5d0f..0f8bdf0b1e 100644 --- a/packages/tasks/src/local-apps.ts +++ b/packages/tasks/src/local-apps.ts @@ -90,6 +90,10 @@ function isLlamaCppGgufModel(model: ModelData) { return !!model.gguf?.context_length; } +function isLlamaFileModel(model: ModelData) { + return model.tags.includes("llamafile") +} + function isMlxModel(model: ModelData) { return model.tags.includes("mlx"); } @@ -137,6 +141,37 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[] ]; }; +const snippetLlamafile = (model: ModelData, filepath?: string): LocalAppSnippet[] => { + const LinuxAndMacCommand = () => { + const snippet = [ + "# Load and run the model:", + `wget https://huggingface.co/${model.id}/resolve/main/${filepath ?? "{{LLMAFILE_FILE}}"}`, + `chmod +x ${filepath ?? "{{LLMAFILE_FILE}}"}`, + `./${filepath ?? "{{LLMAFILE_FILE}}"} -p "Once upon a time,"`, + ]; + return snippet.join("\n"); + }; + const WindowsCommand = () => { + const BaseFilename = (filepath ?? "{{LLMAFILE_FILE}}").split('/').pop()?.replace('.llamafile', '.exe'); + const snippet = [ + "# Load and run the model:", + `wget https://huggingface.co/${model.id}/resolve/main/${filepath ?? "{{LLMAFILE_FILE}}"} -O ${BaseFilename}`, + `./${BaseFilename} -p "Once upon a time,"`, + ]; + return snippet.join("\n"); + }; + return [ + { + title: "Linux and MacOS", + content: LinuxAndMacCommand(), + }, + { + title: "Windows", + content: WindowsCommand(), + }, + ]; +}; + const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => { let tagName = "{{OLLAMA_TAG}}"; @@ -293,6 +328,13 @@ export const LOCAL_APPS = { displayOnModelPage: isLlamaCppGgufModel, snippet: snippetNodeLlamaCppCli, }, + llamafile: { + prettyLabel: "llamafile", + docsUrl: "https://github.com/Mozilla-Ocho/llamafile", + mainTask: "text-generation", + displayOnModelPage: isLlamaFileModel, + snippet: snippetLlamafile, + }, vllm: { prettyLabel: "vLLM", docsUrl: "https://docs.vllm.ai",