|
| 1 | +import * as webllm from "@mlc-ai/web-llm"; |
| 2 | + |
| 3 | +function setLabel(id: string, text: string) { |
| 4 | + const label = document.getElementById(id); |
| 5 | + if (label == null) { |
| 6 | + throw Error("Cannot find label " + id); |
| 7 | + } |
| 8 | + label.innerText = text; |
| 9 | +} |
| 10 | + |
| 11 | +// Helper method to stream responses from the engine |
| 12 | +async function streamResponse( |
| 13 | + engine: webllm.MLCEngineInterface, |
| 14 | + request: webllm.ChatCompletionRequestStreaming, |
| 15 | +): Promise<void> { |
| 16 | + console.log("Requesting chat completion with request:", request); |
| 17 | + const asyncChunkGenerator = await engine.chat.completions.create(request); |
| 18 | + let message = ""; |
| 19 | + for await (const chunk of asyncChunkGenerator) { |
| 20 | + message += chunk.choices[0]?.delta?.content || ""; |
| 21 | + setLabel("generate-label", message); |
| 22 | + if (chunk.usage) { |
| 23 | + console.log(chunk.usage); // only last chunk has usage |
| 24 | + } |
| 25 | + // engine.interruptGenerate(); // works with interrupt as well |
| 26 | + } |
| 27 | + console.log("Final message:\n", await engine.getMessage()); // the concatenated message |
| 28 | +} |
| 29 | + |
| 30 | +/** |
| 31 | + * We demonstrate how Qwen3's best practices can be followed in WebLLM. For more, see |
| 32 | + * https://huggingface.co/Qwen/Qwen3-8B#best-practices. |
| 33 | + */ |
| 34 | +async function main() { |
| 35 | + const initProgressCallback = (report: webllm.InitProgressReport) => { |
| 36 | + setLabel("init-label", report.text); |
| 37 | + }; |
| 38 | + const selectedModel = "Qwen3-4B-q4f16_1-MLC"; |
| 39 | + const engine: webllm.MLCEngineInterface = await webllm.CreateMLCEngine( |
| 40 | + selectedModel, |
| 41 | + { initProgressCallback: initProgressCallback }, |
| 42 | + ); |
| 43 | + |
| 44 | + /** |
| 45 | + * 1. Default behavior: enable thinking |
| 46 | + */ |
| 47 | + let request: webllm.ChatCompletionRequest = { |
| 48 | + stream: true, |
| 49 | + stream_options: { include_usage: true }, |
| 50 | + messages: [ |
| 51 | + { |
| 52 | + role: "user", |
| 53 | + content: "How many r's are there in the word strawberry?", |
| 54 | + }, |
| 55 | + ], |
| 56 | + // Specifying `enable_thinking` is optional, as it defaults to think. |
| 57 | + // extra_body: { |
| 58 | + // enable_thinking: true, |
| 59 | + // } |
| 60 | + }; |
| 61 | + await streamResponse(engine, request); |
| 62 | + |
| 63 | + /** |
| 64 | + * 2. Disable thinking with `enable_thinking: false`. |
| 65 | + */ |
| 66 | + request = { |
| 67 | + stream: true, |
| 68 | + stream_options: { include_usage: true }, |
| 69 | + messages: [ |
| 70 | + { |
| 71 | + role: "user", |
| 72 | + content: "How many r's are there in the word strawberry?", |
| 73 | + }, |
| 74 | + ], |
| 75 | + extra_body: { |
| 76 | + enable_thinking: false, |
| 77 | + }, |
| 78 | + }; |
| 79 | + await streamResponse(engine, request); |
| 80 | + |
| 81 | + /** |
| 82 | + * 3. Disable thinking with soft switch /no_think |
| 83 | + * or enable thinking with soft switch /think. |
| 84 | + * Using soft switch: "When enable_thinking=True, regardless of whether the user |
| 85 | + * uses /think or /no_think, the model will always output a block wrapped in |
| 86 | + * <think>...</think>. However, the content inside this block may be empty if |
| 87 | + * thinking is disabled. When enable_thinking=False, the soft switches are not |
| 88 | + * valid. Regardless of any /think or /no_think tags input by the user, the |
| 89 | + * model will not generate think content and will not include a <think>...</think> block. |
| 90 | + */ |
| 91 | + request = { |
| 92 | + stream: true, |
| 93 | + stream_options: { include_usage: true }, |
| 94 | + messages: [ |
| 95 | + { |
| 96 | + role: "user", |
| 97 | + content: "How many r's are there in the word strawberry? /no_think", |
| 98 | + // content: "How many r's are there in the word strawberry? /think", |
| 99 | + }, |
| 100 | + ], |
| 101 | + }; |
| 102 | + await streamResponse(engine, request); |
| 103 | + |
| 104 | + /** |
| 105 | + * 4. For multi-turn messages, it is recommended to |
| 106 | + * parse out the thinking content in the history |
| 107 | + * messages as described in the Best Practices section. |
| 108 | + */ |
| 109 | + const history: webllm.ChatCompletionMessageParam[] = [ |
| 110 | + { |
| 111 | + role: "user", |
| 112 | + content: "How many r's are there in the word strawberry? /think", |
| 113 | + }, |
| 114 | + { |
| 115 | + role: "assistant", |
| 116 | + content: |
| 117 | + "<think>Dummy thinking content here...</think>\n\nThe answer is 3.", |
| 118 | + }, |
| 119 | + ]; |
| 120 | + // Preprocess history to remove thinking content |
| 121 | + const preprocessedHistory = history.map((msg) => { |
| 122 | + if (msg.role === "assistant") { |
| 123 | + // Remove <think>...</think> block from assistant messages that is at the start |
| 124 | + // and may contain two \n\n line breaks. |
| 125 | + const thinkRegex = /<think>.*?<\/think>\n?\n?/s; // Match <think>...</think> with optional \n\n |
| 126 | + const contentWithoutThink = msg.content!.replace(thinkRegex, "").trim(); |
| 127 | + return { ...msg, content: contentWithoutThink }; |
| 128 | + } |
| 129 | + return msg; // User messages remain unchanged |
| 130 | + }); |
| 131 | + console.log("Preprocessed history:", preprocessedHistory); |
| 132 | + |
| 133 | + // Now use the preprocessed history in the request |
| 134 | + const newMessage: webllm.ChatCompletionMessageParam = { |
| 135 | + role: "user", |
| 136 | + content: "What about blueberries?", |
| 137 | + }; |
| 138 | + |
| 139 | + request = { |
| 140 | + stream: true, |
| 141 | + stream_options: { include_usage: true }, |
| 142 | + messages: [...preprocessedHistory, newMessage], |
| 143 | + }; |
| 144 | + await streamResponse(engine, request); |
| 145 | +} |
| 146 | + |
| 147 | +main(); |
0 commit comments