Skip to content

Commit f47b38b

Browse files
committed
Update demo URLs with cname (#153)
1 parent 18b39e6 commit f47b38b

File tree

9 files changed

+13
-10
lines changed

9 files changed

+13
-10
lines changed

README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ brings language model chats directly onto web browsers with hardware acceleratio
88
**Everything runs inside the browser with no server support and accelerated with WebGPU.**
99
We can bring a lot of fun opportunities to build AI assistants for everyone and enable privacy while enjoying GPU acceleration.
1010

11-
**[Check out our demo webpage to try out!](https://mlc.ai/web-llm/)**
11+
**[Check out our demo webpage to try out!](https://webllm.mlc.ai/)**
1212
This project is a companion project of [MLC LLM](https://github.com/mlc-ai/mlc-llm),
1313
our companion project that runs LLMs natively on iPhone and other native local environments.
1414

@@ -213,7 +213,7 @@ WebLLM package is a web runtime designed for [MLC LLM](https://github.com/mlc-ai
213213
214214
## Links
215215
216-
- [Demo page](https://mlc.ai/web-llm/)
216+
- [Demo page](https://webllm.mlc.ai/)
217217
- If you want to run LLM on native runtime, check out [MLC-LLM](https://github.com/mlc-ai/mlc-llm)
218218
- You might also be interested in [Web Stable Diffusion](https://github.com/mlc-ai/web-stable-diffusion/).
219219

examples/simple-chat/package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"scripts": {
66
"start": "cp src/gh-config.js src/app-config.js && parcel src/llm_chat.html --port 8888",
77
"mlc-local": "cp src/mlc-local-config.js src/app-config.js && parcel src/llm_chat.html --port 8888",
8-
"build": "cp src/gh-config.js src/app-config.js && parcel build src/llm_chat.html --dist-dir lib --no-content-hash --public-url /web-llm"
8+
"build": "cp src/gh-config.js src/app-config.js && parcel build src/llm_chat.html --dist-dir lib --no-content-hash"
99
},
1010
"devDependencies": {
1111
"buffer": "^5.7.1",

examples/simple-chat/src/mlc-local-config.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ export default {
1111
"local_id": "vicuna-v1-7b-q4f32_0"
1212
},
1313
// fp16 options are enabled through chrome canary flags
14-
// chrome --enale-dawn-features=enable_unsafe_apis
14+
// chrome --enable-dawn-features=enable_unsafe_apis
1515
{
1616
"model_url": "http://localhost:8000/RedPajama-INCITE-Chat-3B-v1-q4f16_0/params/",
1717
"local_id": "RedPajama-INCITE-Chat-3B-v1-q4f16_0",

package.json

+3-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,9 @@
1717
"url": "git+https://github.com/mlc-ai/web-llm"
1818
},
1919
"keywords": [
20-
"llm", "large language model", "machine learning"
20+
"llm",
21+
"large language model",
22+
"machine learning"
2123
],
2224
"license": "Apache-2.0",
2325
"homepage": "https://github.com/mlc-ai/web-llm",

scripts/gh_deploy_site.sh

+1
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ rm -rf docs .gitignore
1515
mkdir -p docs
1616
cp -rf site/_site/* docs
1717
touch docs/.nojekyll
18+
echo "webllm.mlc.ai" >> docs/CNAME
1819

1920
DATE=`date`
2021
git add docs && git commit -am "Build at ${DATE}"

scripts/local_deploy_site.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@ cd ../..
88

99
cp examples/simple-chat/lib/* site
1010

11-
cd site && jekyll serve --host localhost --baseurl /web-llm --port 8888
11+
cd site && jekyll serve --host localhost --port 8888

site/_config.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
name: "Web LLM"
33
short_name: "WebLLM"
44

5-
url: https://mlc.ai/web-llm
5+
url: https://webllm.mlc.ai
66

77
exclude: [README.md, serve_local.sh]
88

@@ -24,7 +24,7 @@ permalink: /blog/:year/:month/:day/:title.html
2424
front_page_news: 8
2525

2626
# Base pathname for links.
27-
base: '/web-llm'
27+
base: ''
2828

2929
# make pages for the _projects folder
3030
collections:

site/index.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ The chat demo is based on [vicuna-7b-v1.1](https://huggingface.co/lmsys/vicuna-7
5353
## Links
5454

5555
- [Web LLM Github](https://github.com/mlc-ai/web-llm)
56-
- You might also be interested in [Web Stable Diffusion](https://mlc.ai/web-stable-diffusion/).
56+
- You might also be interested in [Web Stable Diffusion](https://websd.mlc.ai/).
5757

5858
## Disclaimer
5959

src/chat_module.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ export class ChatModule implements ChatInterface {
109109
throw Error(
110110
"This model requires WebGPU extension shader-f16, " +
111111
"which is not enabled in this browser. " +
112-
"You can try Chrome Canary with flag --enable-dawn-features=allow_unsafe_api"
112+
"You can try Chrome Canary with flag --enable-dawn-features=allow_unsafe_apis"
113113
);
114114
}
115115
throw Error(

0 commit comments

Comments
 (0)