diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..28dd30e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,24 @@ +FROM ghcr.io/ggerganov/llama.cpp:light + +RUN apt update && apt install -y curl xz-utils +RUN curl -O https://nodejs.org/dist/v18.16.0/node-v18.16.0-linux-x64.tar.xz && tar xf node-v18.16.0-linux-x64.tar.xz +RUN mv node-v18.16.0-linux-x64/bin/* /usr/bin/ && mv node-v18.16.0-linux-x64/lib/* /usr/lib/ && mv node-v18.16.0-linux-x64/include/* /usr/include/ + +WORKDIR / +RUN mkdir llama.cpp + +WORKDIR /llama.cpp +RUN mkdir models + +RUN mv /main /llama.cpp/ +WORKDIR / + +COPY ./ /gpt-llama.cpp +WORKDIR /gpt-llama.cpp + +RUN npm install +EXPOSE 443 + +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["npm start"] + diff --git a/README.md b/README.md index a2cc27b..25388be 100644 --- a/README.md +++ b/README.md @@ -168,20 +168,52 @@ That's it! npm start mlock threads 8 ctx_size 1000 repeat_penalty 1 lora ../path/lora ``` +## Quickstart Installation (with docker) + +### Prerequisites + +Download appropriate llama models, typically named something like `ggml-model-q4_0.bin` + +### Steps + +1. Clone the repository: + + ```bash + git clone https://github.com/keldenl/gpt-llama.cpp.git + ``` + +2. Build the image: + + ```bash + cd gpt-llama.cpp + docker build -t gpt-llama . + ``` + +3. Run the image to start the server! + + ```bash + docker run --name gpt-llama -v :/llama.cpp/models -p :443 gpt-llama + ``` + + Replace to the directory you placed the downloaded models and replace to the port you want to expose, like 8000. + ## Usage 1. To set up the GPT-powered app, there are 2 ways: - To use with a documented GPT-powered application, follow [supported applications](https://github.com/keldenl/gpt-llama.cpp#Supported-applications) directions. - To use with a undocumented GPT-powered application, please do the following: - - Update the `openai_api_key` slot in the gpt-powered app to the absolute path of your local llama-based model (i.e. for mac, `"/Users//Documents/llama.cpp/models/vicuna/7B/ggml-vicuna-7b-4bit-rev1.bin"`). + - Update the `openai_api_key` slot in the gpt-powered app to the absolute path of your local llama-based model (i.e. for mac, `"/Users//Documents/llama.cpp/models/vicuna/7B/ggml-vicuna-7b-4bit-rev1.bin"`). Note that for docker usage, the path should be started with `/llama.cpp/models`, like `"/llama.cpp/models/vicuna/7B/ggml-vicuna-7b-4bit-rev1.bin"` - Change the `BASE_URL` for the OpenAi endpoint the app is calling to `localhost:443` or `localhost:443/v1`. This is sometimes provided in the `.env` file, or would require manual updating within the app OpenAi calls depending on the specific application. 2. Open another terminal window and test the installation by running the below script, make sure you have a llama .bin model file ready. Test the server by running the `test-installation` script ```bash - # Mac - sh ./test-installion.sh + # Mac & Linux + bash ./test-installation.sh + + # Mac & Linux with docker + ENV=docker bash ./test-installation.sh ``` 3. (Optional) Access the Swagger API docs at `http://localhost:443/docs` to test requests using the provided interface. Note that the authentication token needs to be set to the path of your local llama-based model (i.e. for mac, `"/Users//Documents/llama.cpp/models/vicuna/7B/ggml-vicuna-7b-4bit-rev1.bin"`) for the requests to work properly. diff --git a/test-installation.sh b/test-installation.sh index 492709f..b033a30 100644 --- a/test-installation.sh +++ b/test-installation.sh @@ -8,7 +8,7 @@ port=${port:-443} # default port 443 read -p "Please drag and drop the location of your Llama-based Model (.bin) here and press enter: " path # Check if the file exists -if [ ! -f "$path" ]; then +if [[ "$ENV" != "docker" && ! -f "$path" ]]; then echo "Error: The file does not exist. Please make sure you have provided the correct path." exit 1 fi @@ -59,4 +59,4 @@ else echo "Is the gpt-llama.cpp server running? Try starting the server and running this script again." echo "Make sure you are testing on the right port. The Curl commmand server error port should match your port in the gpt-llama.cpp window." echo "Please check for any errors in the terminal window running the gpt-llama.cpp server." -fi \ No newline at end of file +fi