-
Notifications
You must be signed in to change notification settings - Fork 53
Expand file tree
/
Copy pathdeploy.py
More file actions
163 lines (143 loc) · 6.6 KB
/
deploy.py
File metadata and controls
163 lines (143 loc) · 6.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
# ------------------------------------------------------------------------------
# Copyright 2025 2toINF (https://github.com/2toINF)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
import os
import os.path as osp
import json
import torch
from models.modeling_xvla import XVLA
from models.processing_xvla import XVLAProcessor
import sys
def main():
parser = argparse.ArgumentParser(description="Launch XVLA inference FastAPI server")
parser.add_argument("--model_path", type=str, required=True,
help="Path to the pretrained XVLA model directory")
parser.add_argument('--processor_path', type=str, default=None)
parser.add_argument('--LoRA_path', type=str, default=None)
parser.add_argument("--output_dir", type=str, default="./logs",
help="Directory to save runtime info (info.json)")
parser.add_argument("--device", type=str, default="cuda",
help="Device to load model on (cuda / cpu / auto)")
parser.add_argument("--port", default=8010, type=int,
help="Port number for FastAPI server")
parser.add_argument("--host", default="0.0.0.0", type=str,
help="Host address for FastAPI server")
parser.add_argument("--disable_slurm", action="store_true", default=False)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
print("🚀 Starting XVLA Inference Server...")
print(f"🔹 Model Path : {args.model_path}")
print(f"🔹 Output Dir : {args.output_dir}")
print(f"🔹 Device Arg : {args.device}")
print(f"🔹 Port : {args.port}")
# --------------------------------------------------------------------------
# Select device automatically
# --------------------------------------------------------------------------
if args.device == "auto":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device(args.device)
print(f"🧠 Using device: {device}")
# --------------------------------------------------------------------------
# Load processor (if available)
# --------------------------------------------------------------------------
processor = None
try:
print("\n🧩 Loading XVLAProcessor...")
processor_path = args.processor_path if args.processor_path else args.model_path
processor = XVLAProcessor.from_pretrained(processor_path)
print("✅ XVLAProcessor loaded successfully.")
except Exception as e:
print(f"⚠️ No processor found or failed to load: {e}")
# --------------------------------------------------------------------------
# Load model
# --------------------------------------------------------------------------
print("\n📦 Loading XVLA model from pretrained checkpoint...")
try:
model = XVLA.from_pretrained(
args.model_path,
trust_remote_code=True,
torch_dtype=torch.float32
).to(device).to(torch.float32)
if args.LoRA_path is not None:
print(f"🔸 Applying LoRA weights from {args.LoRA_path} ...")
from peft import PeftModel
model = PeftModel.from_pretrained(
model,
args.LoRA_path,
torch_dtype=torch.float32,
).to(device)
print("✅ LoRA weights applied successfully.")
print("✅ Model successfully loaded and moved to device.")
except Exception as e:
print(f"❌ Failed to load model: {e}")
return
# --------------------------------------------------------------------------
# SLURM environment detection
# --------------------------------------------------------------------------
node_list = os.environ.get("SLURM_NODELIST")
job_id = os.environ.get("SLURM_JOB_ID", "none")
if node_list and not args.disable_slurm:
print("\n🖥️ SLURM Environment Detected:")
print(f" Node list : {node_list}")
print(f" Job ID : {job_id}")
# Extract host
try:
host = ".".join(node_list.split("-")[1:]) if "-" in node_list else node_list
except Exception:
host = args.host
else:
print("\n⚠️ No SLURM environment detected, defaulting to 0.0.0.0")
host = args.host
# --------------------------------------------------------------------------
# Write info.json for bookkeeping (safe version)
# --------------------------------------------------------------------------
info_path = osp.join(args.output_dir, "info.json")
infos = {
"host": host,
"port": args.port,
"job_id": job_id,
"node_list": node_list or "none",
}
# --- Check existence before writing ---
if osp.exists(info_path):
print(f"❌ Error: {info_path} already exists. "
f"This usually means another server is still running or the previous job did not clean up properly.")
print("👉 Please remove it manually or use a different --output_dir.")
sys.exit(1)
# --- Write safely ---
try:
with open(info_path, "w") as f:
json.dump(infos, f, indent=4)
print(f"📝 Server info written to {info_path}")
except Exception as e:
print(f"⚠️ Failed to write {info_path}: {e}")
sys.exit(1)
# --------------------------------------------------------------------------
# Launch FastAPI server
# --------------------------------------------------------------------------
print(f"\n🌐 Launching FastAPI service at http://{host}:{args.port} ...")
try:
if hasattr(model, "run"):
model.run(processor=processor, host=host, port=args.port)
else:
print("❌ The loaded model does not implement `.run()` (FastAPI entrypoint).")
except KeyboardInterrupt:
print("\n🛑 Server stopped manually.")
except Exception as e:
print(f"❌ Server failed to start: {e}")
if __name__ == "__main__":
main()