-
Notifications
You must be signed in to change notification settings - Fork 9
Expand file tree
/
Copy pathserver.py
More file actions
1827 lines (1549 loc) · 69.9 KB
/
server.py
File metadata and controls
1827 lines (1549 loc) · 69.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""
OpenVoiceUI Server — Entry Point
Initialises the Flask application and registers all route blueprints.
Routes are split into focused blueprints under routes/; this file handles
startup wiring, session management, usage tracking, and standalone endpoints
that don't belong to a specific feature blueprint.
Start:
venv/bin/python3 server.py
See README.md for full setup instructions.
"""
import asyncio
import base64
import faulthandler
import json
import logging
import os
import queue
import re
import requests
import shutil
import signal
import sqlite3
import subprocess
import tempfile
import threading
import time
import uuid
from datetime import datetime
from pathlib import Path
import websockets
from dotenv import load_dotenv
from flask import Response, request, jsonify
faulthandler.enable() # print traceback on hard crashes (SIGSEGV etc.)
# Load environment variables before anything else
env_path = Path(__file__).parent / ".env"
load_dotenv(env_path, override=True)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SERVER_START_TIME = time.time()
# ---------------------------------------------------------------------------
# Faster-Whisper — lazy-loaded on first /api/stt/local request
# ---------------------------------------------------------------------------
_whisper_model = None
def get_whisper_model():
global _whisper_model
if _whisper_model is None:
try:
from faster_whisper import WhisperModel
except ImportError:
raise ImportError(
"Local STT requires faster-whisper. Install it with: "
"pip install faster-whisper"
)
logger.info("Loading Faster-Whisper model (first STT request)...")
_whisper_model = WhisperModel("tiny", device="cpu", compute_type="float32")
logger.info("Faster-Whisper model ready.")
return _whisper_model
# ---------------------------------------------------------------------------
# Flask app factory + blueprint registration
# ---------------------------------------------------------------------------
from app import create_app
app, sock = create_app()
from routes.music import music_bp
app.register_blueprint(music_bp)
from routes.canvas import (
canvas_bp,
canvas_context,
update_canvas_context,
extract_canvas_page_content,
get_canvas_context,
load_canvas_manifest,
save_canvas_manifest,
add_page_to_manifest,
sync_canvas_manifest,
CANVAS_MANIFEST_PATH,
CANVAS_PAGES_DIR,
CATEGORY_ICONS,
CATEGORY_COLORS,
)
app.register_blueprint(canvas_bp)
# Seed default pages into canvas-pages on startup (ships with the app image)
# Default pages are app infrastructure (e.g. desktop menu) — auth is skipped in canvas.py.
# Pages with a <!-- openvoiceui-version: X --> comment are re-seeded when version changes.
from services.paths import DEFAULT_PAGES_DIR
_VERSION_RE = re.compile(r'<!--\s*openvoiceui-version:\s*(\S+)\s*-->')
def _extract_page_version(path, max_lines=5):
"""Read version stamp from the first few lines of an HTML file."""
try:
with open(path, 'r', encoding='utf-8') as f:
for _, line in zip(range(max_lines), f):
m = _VERSION_RE.search(line)
if m:
return m.group(1)
except (OSError, UnicodeDecodeError):
pass
return None
if DEFAULT_PAGES_DIR.is_dir():
CANVAS_PAGES_DIR.mkdir(parents=True, exist_ok=True)
for src in DEFAULT_PAGES_DIR.iterdir():
if not src.is_file():
continue
dest = CANVAS_PAGES_DIR / src.name
if not dest.exists():
shutil.copy2(src, dest)
logger.info("Seeded default page: %s", src.name)
else:
# Re-seed if the shipped version is newer than the runtime copy
src_ver = _extract_page_version(src)
if src_ver is not None:
dest_ver = _extract_page_version(dest)
if dest_ver != src_ver:
shutil.copy2(src, dest)
logger.info(
"Re-seeded default page %s (version %s -> %s)",
src.name, dest_ver, src_ver,
)
from routes.static_files import static_files_bp, DJ_SOUNDS, SOUNDS_DIR
app.register_blueprint(static_files_bp)
from routes.admin import admin_bp
app.register_blueprint(admin_bp)
from routes.theme import theme_bp
app.register_blueprint(theme_bp)
from routes.conversation import conversation_bp, clean_for_tts
app.register_blueprint(conversation_bp)
from routes.profiles import profiles_bp
app.register_blueprint(profiles_bp)
from routes.elevenlabs_hybrid import elevenlabs_hybrid_bp
app.register_blueprint(elevenlabs_hybrid_bp)
from routes.instructions import instructions_bp
app.register_blueprint(instructions_bp)
from routes.greetings import greetings_bp
app.register_blueprint(greetings_bp)
from routes.suno import suno_bp
app.register_blueprint(suno_bp)
from routes.airadio_bridge import airadio_bp
app.register_blueprint(airadio_bp)
from routes.vision import vision_bp
app.register_blueprint(vision_bp)
from routes.transcripts import transcripts_bp
app.register_blueprint(transcripts_bp)
from routes.pi import pi_bp
app.register_blueprint(pi_bp)
from routes.onboarding import onboarding_bp
app.register_blueprint(onboarding_bp)
from routes.image_gen import image_gen_bp
app.register_blueprint(image_gen_bp)
from routes.chat import chat_bp
app.register_blueprint(chat_bp)
from routes.workspace import workspace_bp
app.register_blueprint(workspace_bp)
from routes.icons import icons_bp
from routes.report_issue import report_issue_bp
app.register_blueprint(icons_bp)
app.register_blueprint(report_issue_bp)
from routes.registry import registry_bp
app.register_blueprint(registry_bp)
from routes.chatgpt_import import chatgpt_import_bp
app.register_blueprint(chatgpt_import_bp)
# Plugin system — auto-discover and load installed plugins
from routes.plugins import plugins_bp
app.register_blueprint(plugins_bp)
from routes.custom_faces import custom_faces_bp
app.register_blueprint(custom_faces_bp)
from routes.vault import vault_bp
app.register_blueprint(vault_bp)
from routes.identity import identity_bp
app.register_blueprint(identity_bp)
from services.plugins import load_plugins
load_plugins(app)
# Auto-sync canvas manifest on startup so any pages written outside the API
# are picked up immediately without a restart.
try:
sync_canvas_manifest()
logger.info("Canvas manifest synced on startup.")
except Exception as _e:
logger.warning(f"Canvas manifest auto-sync failed (non-critical): {_e}")
# Start canvas page version watcher (auto-saves versions when pages change)
try:
from services.canvas_versioning import start_version_watcher
start_version_watcher()
logger.info("Canvas version watcher started.")
except Exception as _e:
logger.warning(f"Canvas version watcher failed to start (non-critical): {_e}")
# ---------------------------------------------------------------------------
# Voice session management
# ---------------------------------------------------------------------------
from services.paths import VOICE_SESSION_FILE as _VSF_PATH, DB_PATH, UPLOADS_DIR
VOICE_SESSION_FILE = Path(_VSF_PATH)
_consecutive_empty_responses = 0
def _save_session_counter(counter: int) -> None:
VOICE_SESSION_FILE.write_text(str(counter))
def get_voice_session_key() -> str:
"""Return the current voice session key, e.g. 'voice-main-6'."""
prefix = os.getenv("VOICE_SESSION_PREFIX", "voice-main")
try:
counter = int(VOICE_SESSION_FILE.read_text().strip())
except (FileNotFoundError, ValueError):
counter = 1
_save_session_counter(counter)
return f"{prefix}-{counter}"
def bump_voice_session() -> str:
"""Increment the session counter and return the new session key."""
global _consecutive_empty_responses
prefix = os.getenv("VOICE_SESSION_PREFIX", "voice-main")
try:
counter = int(VOICE_SESSION_FILE.read_text().strip())
except (FileNotFoundError, ValueError):
counter = 1
counter += 1
_save_session_counter(counter)
_consecutive_empty_responses = 0
new_key = f"{prefix}-{counter}"
logger.info(f"Session bumped → {new_key}")
return new_key
# ---------------------------------------------------------------------------
# User usage tracking (SQLite)
# ---------------------------------------------------------------------------
MONTHLY_LIMIT = int(os.getenv("MONTHLY_USAGE_LIMIT", "20"))
UNLIMITED_USERS: list = [
u.strip() for u in os.getenv("UNLIMITED_USER_IDS", "").split(",") if u.strip()
]
from services.db_pool import SQLitePool
db_pool = SQLitePool(DB_PATH, pool_size=5)
def init_db() -> None:
conn = sqlite3.connect(DB_PATH)
conn.execute("PRAGMA journal_mode=WAL")
c = conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS usage (
user_id TEXT PRIMARY KEY,
message_count INTEGER DEFAULT 0,
month TEXT,
updated_at TEXT
)
""")
c.execute("""
CREATE TABLE IF NOT EXISTS conversation_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT DEFAULT 'default',
role TEXT NOT NULL,
message TEXT NOT NULL,
tts_provider TEXT,
voice TEXT,
created_at TEXT
)
""")
c.execute("""
CREATE TABLE IF NOT EXISTS conversation_metrics (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT DEFAULT 'default',
profile TEXT,
model TEXT,
handshake_ms INTEGER,
llm_inference_ms INTEGER,
tts_generation_ms INTEGER,
total_ms INTEGER,
user_message_len INTEGER,
response_len INTEGER,
tts_text_len INTEGER,
tts_provider TEXT,
tts_success INTEGER DEFAULT 1,
tts_error TEXT,
tool_count INTEGER DEFAULT 0,
fallback_used INTEGER DEFAULT 0,
error TEXT,
created_at TEXT
)
""")
conn.commit()
conn.close()
def get_current_month() -> str:
return datetime.now().strftime("%Y-%m")
def get_user_usage(user_id: str) -> int:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
c.execute("SELECT message_count, month FROM usage WHERE user_id = ?", (user_id,))
row = c.fetchone()
conn.close()
if row:
count, month = row
return count if month == get_current_month() else 0
return 0
def increment_usage(user_id: str) -> None:
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
current_month = get_current_month()
now = datetime.now().isoformat()
c.execute("SELECT month FROM usage WHERE user_id = ?", (user_id,))
row = c.fetchone()
if row:
if row[0] != current_month:
c.execute(
"UPDATE usage SET message_count = 1, month = ?, updated_at = ? WHERE user_id = ?",
(current_month, now, user_id),
)
else:
c.execute(
"UPDATE usage SET message_count = message_count + 1, updated_at = ? WHERE user_id = ?",
(now, user_id),
)
else:
c.execute(
"INSERT INTO usage (user_id, message_count, month, updated_at) VALUES (?, 1, ?, ?)",
(user_id, current_month, now),
)
conn.commit()
conn.close()
init_db()
# ---------------------------------------------------------------------------
# Upload directory
# ---------------------------------------------------------------------------
UPLOADS_DIR.mkdir(parents=True, exist_ok=True)
# ---------------------------------------------------------------------------
# Routes — index
# ---------------------------------------------------------------------------
@app.route("/")
def serve_index():
"""Serve index.html with injected runtime config.
Set AGENT_SERVER_URL in .env to override the backend URL the frontend
connects to. Defaults to window.location.origin (correct for same-origin
deployments).
"""
import pathlib
html = pathlib.Path("index.html").read_text()
server_url = os.environ.get("AGENT_SERVER_URL", "").strip().rstrip("/")
clerk_key = (os.environ.get("CLERK_PUBLISHABLE_KEY") or os.environ.get("VITE_CLERK_PUBLISHABLE_KEY", "")).strip()
client_name = os.environ.get("CLIENT_NAME", "").strip()
import json as _json
devsite_map_raw = os.environ.get("DEVSITE_MAP", "{}").strip()
try:
devsite_map = _json.loads(devsite_map_raw)
except Exception:
devsite_map = {}
config_parts = []
config_parts.append(f'serverUrl:"{server_url}"' if server_url else 'serverUrl:window.location.origin')
if clerk_key:
config_parts.append(f'clerkPublishableKey:"{clerk_key}"')
if devsite_map:
config_parts.append(f'devsiteMap:{_json.dumps(devsite_map)}')
if client_name:
config_parts.append(f'clientName:{_json.dumps(client_name)}')
config_parts.append('managedUpdates:true')
config_block = f'<script>window.AGENT_CONFIG={{{",".join(config_parts)}}};</script>'
html = html.replace("<head>", f"<head>\n {config_block}", 1)
# Replace PWA title and apple-mobile-web-app-title with client name
if client_name:
html = html.replace("<title>OpenVoiceUI</title>", f"<title>{client_name}</title>")
html = html.replace('content="OpenVoiceUI"', f'content="{client_name}"')
resp = Response(html, mimetype="text/html")
resp.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
return resp
# ---------------------------------------------------------------------------
# Routes — version info
# ---------------------------------------------------------------------------
_VERSION_INFO = {"commit": "unknown", "branch": "unknown", "date": "unknown"}
_version_file = Path(__file__).parent / "version.json"
if _version_file.exists():
try:
_VERSION_INFO = json.loads(_version_file.read_text())
except Exception:
pass
# Read package.json version as authoritative version string
_PACKAGE_VERSION = "unknown"
_package_file = Path(__file__).parent / "package.json"
if _package_file.exists():
try:
_PACKAGE_VERSION = json.loads(_package_file.read_text()).get("version", "unknown")
except Exception:
pass
_VERSION_INFO["version"] = _PACKAGE_VERSION
@app.route("/api/version", methods=["GET"])
def get_version():
"""Return build version and check GitHub for latest release."""
data = {**_VERSION_INFO, "uptime_seconds": round(time.time() - SERVER_START_TIME)}
# Check for newer release on GitHub (cached, non-blocking)
latest = _get_latest_release_info()
if latest:
data["latest_commit"] = latest["sha"]
data["latest_date"] = latest["date"]
data["latest_message"] = latest["message"]
data["latest_version"] = latest.get("latest_version", "")
# Check if we need an update. Three strategies:
# 1. git merge-base (if .git exists) — handles being ahead of release
# 2. Commit SHA comparison — direct match
# 3. package.json version vs release tag — fallback when commit unknown
current = _VERSION_INFO.get("commit", "unknown")
data["update_available"] = False
if current != "unknown" and latest.get("sha"):
app_dir = Path(__file__).parent
if (app_dir / ".git").is_dir():
try:
result = subprocess.run(
["git", "merge-base", "--is-ancestor",
latest["sha"], "HEAD"],
cwd=str(app_dir), capture_output=True, timeout=5,
)
# returncode 0 = release is ancestor of HEAD (up to date)
# returncode 1 = release is NOT ancestor (need update)
data["update_available"] = result.returncode != 0
except Exception:
data["update_available"] = not latest["sha"].startswith(current)
else:
data["update_available"] = not latest["sha"].startswith(current)
elif current == "unknown" and latest.get("latest_version"):
# Commit unknown (Docker build without args, etc.)
# Fall back to comparing package.json version against release tag.
pkg_ver = _PACKAGE_VERSION
rel_tag = latest["latest_version"]
if pkg_ver != "unknown" and rel_tag:
rel_ver = rel_tag.lstrip("v")
data["update_available"] = rel_ver.split("-")[0] != pkg_ver.split("-")[0] or rel_ver > pkg_ver
return jsonify(data)
# Cache GitHub check for 5 minutes to avoid hammering the API
_github_cache = {"data": None, "expires": 0}
_GITHUB_REPO = os.environ.get("GITHUB_REPO", "MCERQUA/OpenVoiceUI").strip()
def _get_latest_release_info():
"""Fetch the latest GitHub release and its commit SHA.
Only compares against tagged releases — not every commit on main.
This prevents false "update available" banners from routine merges.
"""
now = time.time()
if _github_cache["data"] and now < _github_cache["expires"]:
return _github_cache["data"]
try:
# Get the latest release
rel = requests.get(
f"https://api.github.com/repos/{_GITHUB_REPO}/releases/latest",
headers={"Accept": "application/vnd.github.v3+json"},
timeout=5,
)
if rel.status_code != 200:
return _github_cache.get("data")
rel_data = rel.json()
tag_name = rel_data.get("tag_name", "")
# Get the commit SHA for this release tag
tag_resp = requests.get(
f"https://api.github.com/repos/{_GITHUB_REPO}/git/ref/tags/{tag_name}",
headers={"Accept": "application/vnd.github.v3+json"},
timeout=5,
)
sha = ""
if tag_resp.status_code == 200:
tag_obj = tag_resp.json().get("object", {})
sha = tag_obj.get("sha", "")[:7]
result = {
"sha": sha,
"date": rel_data.get("published_at", ""),
"message": rel_data.get("name", tag_name),
"latest_version": tag_name,
}
_github_cache["data"] = result
_github_cache["expires"] = now + 300 # 5 min cache
return result
except Exception:
pass
return _github_cache.get("data")
def _self_update():
"""Intelligent update: analyse → detect agent → review → apply → verify.
Instead of a blind ``git pull``, this:
1. Analyses what changed upstream vs what's customised locally
2. Searches for an available CLI coding agent (claude, codex, z-code, etc.)
3. If an agent is found AND there are conflicts → spawns it with a
comprehensive prompt that enumerates every possible breaking point
4. If no agent or low risk → uses heuristic-based smart update with
automatic backup/rollback
5. Verifies health after every update; rolls back on failure
Works in all deployment scenarios: native, Docker, multi-tenant, Pinokio.
"""
from services.update_manager import UpdateManager
app_dir = Path(__file__).parent
mgr = UpdateManager(app_dir)
return mgr.apply_update()
@app.route("/api/version/preview", methods=["GET"])
def preview_update():
"""Return a preview of what an update would change, without applying it.
The frontend can call this to show the user:
- What files will change
- What local customisations exist
- What conflicts were detected
- Which update method will be used (AI agent or smart fallback)
- Risk level (low / medium / high)
"""
from services.update_manager import UpdateManager
app_dir = Path(__file__).parent
mgr = UpdateManager(app_dir)
return jsonify(mgr.get_update_preview())
@app.route("/api/version/update", methods=["POST"])
def trigger_update():
"""Update the app to the latest version.
Tries three strategies in order:
1. Intelligent self-update (analyses diffs, detects agent, backs up,
verifies) — works for native/Pinokio/dev/Docker installs with .git
2. Host update service (JamBot / managed Docker hosting)
3. Returns instructions as last resort
"""
# Check if already current
latest = _get_latest_release_info()
current = _VERSION_INFO.get("commit", "unknown")
if latest and current != "unknown" and latest["sha"].startswith(current):
return jsonify({"status": "current", "message": "Already up to date"})
# Strategy 1: Intelligent self-update if this is a git repo
app_dir = Path(__file__).parent
if (app_dir / ".git").is_dir():
result = _self_update()
status = result.get("status", "error")
if status == "success":
return jsonify({
"status": "updating",
"message": "Update applied — app will restart in a moment",
"method": result.get("method", "unknown"),
"details": {
"files_updated": len(result.get("files_updated", [])),
"customisations_preserved": result.get("customisations_preserved", []),
"warnings": result.get("warnings", []),
},
})
elif status == "current":
return jsonify({"status": "current", "message": "Already up to date"})
elif status == "rolled_back":
return jsonify({
"status": "rolled_back",
"message": "Update was rolled back due to issues",
"reason": result.get("reason", "Verification failed"),
"method": result.get("method", "unknown"),
}), 409
else:
return jsonify({
"status": "error",
"error": result.get("reason", result.get("error", "Update failed")),
"method": result.get("method", "unknown"),
}), 500
# Strategy 2: Host update service (JamBot / managed Docker)
client_name = os.environ.get("CLIENT_NAME", "").strip()
if client_name:
for host in ["host.docker.internal", "172.17.0.1"]:
try:
r = requests.post(f"http://{host}:5199/update/{client_name}", timeout=5)
if r.status_code == 200:
return jsonify({"status": "updating", "message": "Updating — your system will restart in a moment"})
except Exception:
pass
# Strategy 3: Manual — shouldn't happen often
return jsonify({"status": "manual", "message": "Pull the latest version and rebuild to update"})
# ---------------------------------------------------------------------------
# Routes — health probes
# ---------------------------------------------------------------------------
from services.health import health_checker as _health_checker
@app.route("/health/live", methods=["GET"])
def health_live():
"""Liveness probe — always 200 while the process is running."""
result = _health_checker.liveness()
return jsonify({"healthy": result.healthy, "message": result.message, "details": result.details}), 200
@app.route("/health/ready", methods=["GET"])
def health_ready():
"""Readiness probe — 200 only when Gateway and TTS are available."""
result = _health_checker.readiness()
code = 200 if result.healthy else 503
return jsonify({"healthy": result.healthy, "message": result.message, "details": result.details}), code
@app.route("/api/memory-status", methods=["GET"])
def memory_status():
"""Process memory usage — for watchdog monitoring."""
import resource
rusage = resource.getrusage(resource.RUSAGE_SELF)
current_mb = rusage.ru_maxrss / 1024 # ru_maxrss is KB on Linux
return jsonify({"process": {"current_mb": round(current_mb, 1)}})
# ---------------------------------------------------------------------------
# Routes — session
# ---------------------------------------------------------------------------
@app.route("/api/session", methods=["GET"])
def session_info():
"""Return the current voice session key and consecutive-empty-response count."""
return jsonify({
"sessionKey": get_voice_session_key(),
"consecutiveEmpty": _consecutive_empty_responses,
})
@app.route("/api/session/reset", methods=["POST"])
def session_reset():
"""Reset the voice session context.
Body (JSON, optional):
{ "mode": "soft" } — bump session key only (default)
{ "mode": "hard" } — bump session key and pre-warm the new session
"""
from services.gateway import gateway_connection
data = request.get_json(silent=True) or {}
mode = data.get("mode", "soft")
if mode not in ("soft", "hard"):
return jsonify({"error": f"Invalid mode '{mode}'. Use 'soft' or 'hard'."}), 400
old_key = get_voice_session_key()
new_key = bump_voice_session()
if mode == "hard":
def _prewarm():
try:
gateway_connection.stream_to_queue(
queue.Queue(),
"[SYSTEM: session pre-warm, reply with exactly: ok]",
new_key,
[],
)
logger.info(f"Pre-warm complete for {new_key}")
except Exception as e:
logger.warning(f"Pre-warm failed: {e}")
threading.Thread(target=_prewarm, daemon=True).start()
return jsonify({
"old": old_key,
"new": new_key,
"mode": mode,
"message": f"Session reset ({mode})." + (" Pre-warming new session..." if mode == "hard" else ""),
})
# ---------------------------------------------------------------------------
# Routes — diagnostics
# ---------------------------------------------------------------------------
@app.route("/api/diagnostics", methods=["GET"])
def diagnostics():
"""Diagnostic dashboard — uptime, active config, recent timing metrics."""
import resource
uptime_seconds = int(time.time() - SERVER_START_TIME)
uptime_h = uptime_seconds // 3600
uptime_m = (uptime_seconds % 3600) // 60
rusage = resource.getrusage(resource.RUSAGE_SELF)
memory_mb = round(rusage.ru_maxrss / 1024, 1)
state = {
"server": {
"uptime": f"{uptime_h}h {uptime_m}m",
"uptime_seconds": uptime_seconds,
"memory_mb": memory_mb,
"pid": os.getpid(),
"started_at": datetime.fromtimestamp(SERVER_START_TIME).isoformat(),
},
"config": {
"gateway_url": os.getenv("CLAWDBOT_GATEWAY_URL", "ws://127.0.0.1:18791"),
"session_key": get_voice_session_key(),
"tts_provider": os.getenv("DEFAULT_TTS_PROVIDER", "groq"),
"port": os.getenv("PORT", "5001"),
},
}
try:
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("""
SELECT profile, model, handshake_ms, llm_inference_ms,
tts_generation_ms, total_ms, user_message_len,
response_len, tts_text_len, tts_provider, tts_success,
tts_error, tool_count, fallback_used, error, created_at
FROM conversation_metrics
ORDER BY id DESC LIMIT 10
""")
state["recent_conversations"] = [dict(r) for r in c.fetchall()]
c.execute("""
SELECT COUNT(*) as total_conversations,
AVG(total_ms) as avg_total_ms,
AVG(llm_inference_ms) as avg_llm_ms,
AVG(tts_generation_ms) as avg_tts_ms,
AVG(handshake_ms) as avg_handshake_ms,
SUM(CASE WHEN tts_success = 0 THEN 1 ELSE 0 END) as tts_failures,
SUM(CASE WHEN error IS NOT NULL THEN 1 ELSE 0 END) as errors,
MAX(total_ms) as max_total_ms,
MIN(total_ms) as min_total_ms
FROM conversation_metrics
WHERE created_at > datetime('now', '-1 hour')
""")
stats = dict(c.fetchone() or {})
for key in ("avg_total_ms", "avg_llm_ms", "avg_tts_ms", "avg_handshake_ms"):
if stats.get(key) is not None:
stats[key] = round(stats[key])
state["last_hour_stats"] = stats
conn.close()
except Exception as e:
state["metrics_error"] = str(e)
return jsonify(state)
# ---------------------------------------------------------------------------
# Routes — Hume EVI token (used by src/adapters/hume-evi.js)
# ---------------------------------------------------------------------------
@app.route("/api/hume/token", methods=["GET"])
def get_hume_token():
"""Return a short-lived Hume access token for EVI WebSocket connections.
Returns 403 when Hume credentials are not configured — the frontend
adapter treats this as 'Hume unavailable' rather than an error.
"""
api_key = os.getenv("HUME_API_KEY")
secret_key = os.getenv("HUME_SECRET_KEY")
if not api_key or not secret_key:
return jsonify({"error": "Hume API credentials not configured", "available": False}), 403
try:
credentials = f"{api_key}:{secret_key}"
encoded = base64.b64encode(credentials.encode()).decode()
response = requests.post(
"https://api.hume.ai/oauth2-cc/token",
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": f"Basic {encoded}",
},
data={"grant_type": "client_credentials"},
timeout=10,
)
if response.status_code != 200:
logger.error(f"Hume token request failed: {response.status_code} — {response.text}")
return jsonify({"error": "Failed to get Hume access token", "available": False}), 500
token_data = response.json()
return jsonify({
"access_token": token_data.get("access_token"),
"expires_in": token_data.get("expires_in", 3600),
"config_id": os.getenv("HUME_CONFIG_ID"),
"available": True,
})
except Exception as e:
logger.error(f"Hume token error: {e}")
return jsonify({"error": "Failed to retrieve token", "available": False}), 500
# ---------------------------------------------------------------------------
# Routes — STT (Speech-to-Text)
# ---------------------------------------------------------------------------
@app.route("/api/stt/groq", methods=["POST"])
def groq_stt():
"""Transcribe audio using Groq Whisper Large v3 Turbo (cloud, fast)."""
from services.tts import get_groq_client as _get_groq_client
if "audio" not in request.files:
return jsonify({"error": "No audio file provided"}), 400
audio_file = request.files["audio"]
groq = _get_groq_client()
if not groq:
return jsonify({"error": "Groq client not available — check GROQ_API_KEY"}), 500
try:
audio_bytes = audio_file.read()
audio_tuple = (
audio_file.filename or "audio.webm",
audio_bytes,
audio_file.content_type or "audio/webm",
)
transcription = groq.audio.transcriptions.create(
file=audio_tuple,
model="whisper-large-v3-turbo",
response_format="verbose_json",
language="en",
temperature=0,
prompt="",
)
# Filter segments with high no_speech_prob or low confidence (Whisper hallucinations)
import re as _re
segments = getattr(transcription, 'segments', None)
if segments:
filtered_texts = []
for seg in segments:
_nsp = seg.get('no_speech_prob', 0) if isinstance(seg, dict) else getattr(seg, 'no_speech_prob', 0)
_alp = seg.get('avg_logprob', 0) if isinstance(seg, dict) else getattr(seg, 'avg_logprob', 0)
_stxt = (seg.get('text', '') if isinstance(seg, dict) else seg.text).strip()
# Reject: high no-speech probability OR very low confidence
if _nsp >= 0.2:
logger.debug(f"Groq STT: dropping segment (no_speech_prob={_nsp:.2f}): {_stxt!r}")
continue
if _alp < -1.0:
logger.debug(f"Groq STT: dropping segment (avg_logprob={_alp:.2f}): {_stxt!r}")
continue
filtered_texts.append(_stxt)
text = ' '.join(filtered_texts).strip()
else:
text = (transcription.text or "").strip()
logger.info(f"Groq STT: {text!r}")
# --- Whisper hallucination filtering ---
_WHISPER_HALLUCINATIONS = {
"thank you", "thanks for watching", "thanks for listening",
"i'm here with closed captioning", "closed captioning",
"subscribe", "please subscribe", "like and subscribe",
"you", "bye", "the end", "subtitles by", "translated by",
"voice command for ai assistant", "voice command for ai",
"alright", "all right", "okay", "ok", "yeah", "yes",
"um", "uh", "hmm", "huh", "oh", "ah",
"so", "well", "right", "sure", "hey",
"thanks", "thank you so much",
"i don't know", "i'm sorry",
}
# Substrings that indicate prompt-echo or known garbage
_HALLUCINATION_SUBSTRINGS = [
"voice command for ai",
"thanks for watching", "thanks for listening",
"like and subscribe", "please subscribe",
"subtitles by", "translated by", "closed captioning",
"coupo foundation", # known recurring hallucination
]
text_lower = text.lower().rstrip('.!?,;:')
_meaningful = _re.sub(r'[^a-zA-Z0-9]', '', text)
def _is_hallucination(t, t_lower):
# Exact match against known phrases
if t_lower in _WHISPER_HALLUCINATIONS:
return True
# Too short to be real speech
if len(_meaningful) < 3:
return True
# Prompt text or known garbage appears anywhere in transcription
for sub in _HALLUCINATION_SUBSTRINGS:
if sub in t_lower:
return True
# Repetitive pattern: same word/phrase repeated many times
words = _re.findall(r'[a-zA-Z]+', t)
if len(words) >= 4:
from collections import Counter
counts = Counter(w.lower() for w in words)
most_common_count = counts.most_common(1)[0][1]
if most_common_count / len(words) >= 0.5:
return True
return False
if _is_hallucination(text, text_lower):
logger.info(f"Groq STT: FILTERED hallucination/garbage: {text!r}")
return jsonify({"transcript": "", "success": True, "filtered": True})
return jsonify({"transcript": text, "success": True})
except Exception as e:
logger.error(f"Groq STT error: {e}")
return jsonify({"error": "Speech-to-text failed"}), 500
@app.route("/api/stt/deepgram/token", methods=["GET"])
def deepgram_stt_token():
"""Return the Deepgram API key for browser-side WebSocket streaming.
The browser needs the key to open a direct WebSocket to Deepgram's
live transcription API. The key is passed via the WebSocket sub-protocol
header so it never appears in URLs or logs.
NOTE: Deepgram supports scoped / short-lived project keys — if you want
tighter security, create a key with only 'usage:write' permission and
rotate it. For now we hand out the configured key since the UI is
already authenticated.
"""
api_key = os.environ.get("DEEPGRAM_API_KEY", "")
if not api_key:
return jsonify({"error": "DEEPGRAM_API_KEY not configured"}), 500
return jsonify({"token": api_key})
@app.route("/api/stt/deepgram", methods=["POST"])
def deepgram_stt():
"""Transcribe audio using Deepgram Nova-2 API (reliable, low-cost)."""
import re as _re
if "audio" not in request.files:
return jsonify({"error": "No audio file provided"}), 400
api_key = os.environ.get("DEEPGRAM_API_KEY", "")
if not api_key:
return jsonify({"error": "DEEPGRAM_API_KEY not configured"}), 500
audio_file = request.files["audio"]
try:
audio_bytes = audio_file.read()
content_type = audio_file.content_type or "audio/webm"
import requests as _requests