-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
127 lines (108 loc) · 5.48 KB
/
.env.example
File metadata and controls
127 lines (108 loc) · 5.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# =============================================================================
# LLMephant Configuration
# =============================================================================
# Copy to .env and fill in values. Most settings have sensible defaults in
# settings.py — you only need to set what differs from your setup.
#
# For a basic local setup (Ollama on localhost), the defaults work out of the
# box. Just run: docker compose up --build
# =============================================================================
# -- Upstream LLM (required) --------------------------------------------------
# OpenAI-compatible base URL for your LLM server.
# Default: http://localhost:11434/v1 (Ollama)
# Examples:
# Ollama: http://localhost:11434/v1
# vLLM: http://localhost:8000/v1
# LM Studio: http://localhost:1234/v1
# lemonade: http://localhost:8000/api/v1
UPSTREAM_OPENAI_BASE=http://localhost:11434/v1
UPSTREAM_OPENAI_API_KEY=not-needed
# -- Audio transcription (optional) -------------------------------------------
# Falls back to UPSTREAM_OPENAI_BASE, so only set this if your Whisper service
# lives on a different host. Any OpenAI-compatible /v1/audio/transcriptions works.
# UPSTREAM_WHISPER_BASE=
# -- Vector store --------------------------------------------------------------
# Qdrant for memory storage. Defaults match the docker-compose setup.
QDRANT_URL=http://qdrant:6333
QDRANT_COLLECTION=memories
EMBEDDING_MODEL_NAME=sentence-transformers/all-MiniLM-L6-v2
# -- Memory --------------------------------------------------------------------
# Model used for memory extraction/distillation/verification.
MEMORY_MODEL_NAME=qwen2.5:14b-instruct
MEMORY_MIN_CONFIDENCE=0.9
MEMORY_DEDUPE_THRESHOLD=0.80
MEMORY_SIMILARITY_THRESHOLD=0.55
MEMORY_TTL_DAYS=365
ENABLE_MEMORY_EXTRACTION=true
# Token caps — reduce for lower latency, leave blank for backend defaults.
# MEMORY_EXTRACT_MAX_TOKENS=400
# MEMORY_DISTILL_MAX_TOKENS=800
# MEMORY_VERIFY_MAX_TOKENS=300
# MEMORY_REASONING_EFFORT=none
# -- Tools (MCP) ---------------------------------------------------------------
# TOOLING_CONFIG_FILE=./tooling_config.yml
# MAX_TOOL_ITERATIONS=10
# MAX_TOOL_RESULT_CHARS=40000
# -- API server ----------------------------------------------------------------
# These rarely need changing. docker-compose overrides HOST/PORT anyway.
# API_HOST=0.0.0.0
# API_PORT=8080
# DEFAULT_USER_ID=local-user
# LOG_COLORIZE=true
# LOG_LEVEL=INFO
# =============================================================================
# OpenWebUI Configuration
# =============================================================================
# These are read by the openwebui container via the shared .env file.
# Full reference: https://docs.openwebui.com/reference/env-configuration
# -- Required for LLMephant integration ----------------------------------------
# Forwards user identity to LLMephant so memories are per-user.
ENABLE_FORWARD_USER_INFO_HEADERS=true
# Required, and defaults to the memory-api service in our Docker Compose stack
ENABLE_OPENAI_API=true
OPENAI_API_BASE_URL=http://memory-api:8080/api/v1
OPENAI_API_KEY=not-needed
RAG_SYSTEM_CONTEXT=True
# -- Audio STT (optional) -----------------------------------------------------
# Point OpenWebUI's speech-to-text at LLMephant instead of its built-in Whisper.
# Requires your upstream LLM (or UPSTREAM_WHISPER_BASE) to serve Whisper.
# AUDIO_STT_ENGINE=openai
# AUDIO_STT_OPENAI_API_BASE_URL=http://memory-api:8080/api/v1
# AUDIO_STT_MODEL=whisper-1
# -- User management -----------------------------------------------------------
# DEFAULT_USER_ROLE: role assigned to new accounts (pending | user | admin).
# "pending" = new users need admin approval; "user" = immediate access.
DEFAULT_USER_ROLE=user
ENABLE_SIGNUP=true
ENABLE_LOGIN_FORM=true
# -- UI behavior ---------------------------------------------------------------
# ENABLE_FOLLOW_UP_GENERATION: auto-suggest follow-up questions (default: true)
# ENABLE_ADMIN_CHAT_ACCESS: let admins view other users' chats (default: true)
# -- Security ------------------------------------------------------------------
# WEBUI_SECRET_KEY: encryption key for JWT tokens and sensitive data.
# MUST be set to a stable random value in production — if it changes,
# all sessions (including OAuth) are invalidated.
WEBUI_SECRET_KEY=change-me-in-production
# WEBUI_URL: the public URL where OpenWebUI is accessible.
# Required for OAuth/SSO callback URLs.
# WEBUI_URL=http://localhost:3000
# WEBUI_PORT: the public-facing port to bind the service in Docker
WEBUI_PORT=8080
# -- OAuth / SSO (optional) ----------------------------------------------------
# Uncomment and fill in to enable Google (or other OIDC) login.
# ENABLE_OAUTH_SIGNUP=true
# GOOGLE_CLIENT_ID=
# GOOGLE_CLIENT_SECRET=
# OPENID_PROVIDER_URL=https://accounts.google.com/.well-known/openid-configuration
# OAUTH_MERGE_ACCOUNTS_BY_EMAIL=true
# -- Vector DB ------------------------------------------------------------------
# This is set by default to the Qdrant service in the Docker Compose stack, but
# you can change this to another vector database. You can also comment them out
# which will use OpenWebUI's built-in vector database
VECTOR_DB=qdrant
QDRANT_URI=http://qdrant:6333
# =============================================================================
# Custom Environment Variables
# =============================================================================
# Tokens, secrets, etc. referenced in tooling_config.yml via ${VAR_NAME}.
# MCP_BEARER_TOKEN=your-token-here