forked from simstudioai/sim
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
98 lines (93 loc) · 2.4 KB
/
docker-compose.yml
File metadata and controls
98 lines (93 loc) · 2.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
version: '3.8'
services:
simstudio:
build:
context: .
dockerfile: Dockerfile
ports:
- "3000:3000"
volumes:
- ./sim:/app
- /app/node_modules
- /app/.next
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio
- POSTGRES_URL=postgresql://postgres:postgres@db:5432/simstudio
- BETTER_AUTH_URL=http://localhost:3000
- NEXT_PUBLIC_APP_URL=http://localhost:3000
- BETTER_AUTH_SECRET=your_auth_secret_here
- ENCRYPTION_KEY=your_encryption_key_here
- FREESTYLE_API_KEY=placeholder
- GOOGLE_CLIENT_ID=placeholder
- GOOGLE_CLIENT_SECRET=placeholder
- GITHUB_CLIENT_ID=placeholder
- GITHUB_CLIENT_SECRET=placeholder
- RESEND_API_KEY=placeholder
depends_on:
db:
condition: service_healthy
db:
image: postgres:16
restart: always
ports:
- "5432:5432"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=simstudio
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
local-llm-gpu:
profiles:
- local-gpu # This profile requires both 'local' and 'gpu'
image: ollama/ollama:latest
pull_policy: always
volumes:
- ${HOME}/.ollama:/root/.ollama
ports:
- "11434:11434"
environment:
- NVIDIA_DRIVER_CAPABILITIES=all
- OLLAMA_LOAD_TIMEOUT=-1
- OLLAMA_KEEP_ALIVE=-1
- OLLAMA_DEBUG=1
command: "serve"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/"]
interval: 10s
timeout: 5s
retries: 5
local-llm-cpu:
profiles:
- local-cpu # This profile requires both 'local' and 'cpu'
image: ollama/ollama:latest
pull_policy: always
volumes:
- ${HOME}/.ollama:/root/.ollama
ports:
- "11434:11434"
environment:
- OLLAMA_LOAD_TIMEOUT=-1
- OLLAMA_KEEP_ALIVE=-1
- OLLAMA_DEBUG=1
command: "serve"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/"]
interval: 10s
timeout: 5s
retries: 5
volumes:
postgres_data: