-
Notifications
You must be signed in to change notification settings - Fork 867
Open
Description
This is how I load. By the way text to video doesnt have this. E.g. tested on WAN 2.1 14B Text-to-Video
I hope you can help me @Artiprocher
elif model_choice == "14B_image_480p":
clip_path = get_common_file(os.path.join("models", "models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"))
t5_path = get_common_file(os.path.join("models", "models_t5_umt5-xxl-enc-bf16.pth"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "models_t5_umt5-xxl-enc-bf16.pth"))
vae_path = get_common_file(os.path.join("models", "Wan2.1_VAE.pth"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "Wan2.1_VAE.pth"))
model_manager.load_models([clip_path], torch_dtype=torch.float32)
model_manager.load_models(
[
[
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "diffusion_pytorch_model-00001-of-00007.safetensors"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "diffusion_pytorch_model-00002-of-00007.safetensors"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "diffusion_pytorch_model-00003-of-00007.safetensors"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "diffusion_pytorch_model-00004-of-00007.safetensors"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "diffusion_pytorch_model-00005-of-00007.safetensors"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "diffusion_pytorch_model-00006-of-00007.safetensors"),
os.path.join("models", "Wan-AI", "Wan2.1-I2V-14B-480P", "diffusion_pytorch_model-00007-of-00007.safetensors"),
],
t5_path,
vae_path,
],
torch_dtype=torch.float8_e4m3fn,
)
pipe = WanVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device=device)
try:
num_persistent_val = int(num_persistent)
except:
print("[CMD] Warning: could not parse num_persistent value, defaulting to 6000000000")
num_persistent_val = 6000000000
print(f"num_persistent_val {num_persistent_val}")
pipe.enable_vram_management(num_persistent_param_in_dit=num_persistent_val)
print("[CMD] Model loaded successfully.")
return pipe
Now I will show BF16 vs FP8
improved_00213.mp4
improved_00215.mp4
anr2me
Metadata
Metadata
Assignees
Labels
No labels