Skip to content

Commit a79c3af

Browse files
authored
[single file] Cosmos (#11801)
* update * update * update docs
1 parent 3f3f0c1 commit a79c3af

File tree

5 files changed

+184
-2
lines changed

5 files changed

+184
-2
lines changed

docs/source/en/api/pipelines/cosmos.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,31 @@ Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers)
2424

2525
</Tip>
2626

27+
## Loading original format checkpoints
28+
29+
Original format checkpoints that have not been converted to diffusers-expected format can be loaded using the `from_single_file` method.
30+
31+
```python
32+
import torch
33+
from diffusers import Cosmos2TextToImagePipeline, CosmosTransformer3DModel
34+
35+
model_id = "nvidia/Cosmos-Predict2-2B-Text2Image"
36+
transformer = CosmosTransformer3DModel.from_single_file(
37+
"https://huggingface.co/nvidia/Cosmos-Predict2-2B-Text2Image/blob/main/model.pt",
38+
torch_dtype=torch.bfloat16,
39+
).to("cuda")
40+
pipe = Cosmos2TextToImagePipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.bfloat16)
41+
pipe.to("cuda")
42+
43+
prompt = "A close-up shot captures a vibrant yellow scrubber vigorously working on a grimy plate, its bristles moving in circular motions to lift stubborn grease and food residue. The dish, once covered in remnants of a hearty meal, gradually reveals its original glossy surface. Suds form and bubble around the scrubber, creating a satisfying visual of cleanliness in progress. The sound of scrubbing fills the air, accompanied by the gentle clinking of the dish against the sink. As the scrubber continues its task, the dish transforms, gleaming under the bright kitchen lights, symbolizing the triumph of cleanliness over mess."
44+
negative_prompt = "The video captures a series of frames showing ugly scenes, static with no motion, motion blur, over-saturation, shaky footage, low resolution, grainy texture, pixelated images, poorly lit areas, underexposed and overexposed scenes, poor color balance, washed out colors, choppy sequences, jerky movements, low frame rate, artifacting, color banding, unnatural transitions, outdated special effects, fake elements, unconvincing visuals, poorly edited content, jump cuts, visual noise, and flickering. Overall, the video is of poor quality."
45+
46+
output = pipe(
47+
prompt=prompt, negative_prompt=negative_prompt, generator=torch.Generator().manual_seed(1)
48+
).images[0]
49+
output.save("output.png")
50+
```
51+
2752
## CosmosTextToWorldPipeline
2853

2954
[[autodoc]] CosmosTextToWorldPipeline

scripts/convert_cosmos_to_diffusers.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,6 @@ def rename_transformer_blocks_(key: str, state_dict: Dict[str, Any]):
9595
"mlp.layer1": "ff.net.0.proj",
9696
"mlp.layer2": "ff.net.2",
9797
"x_embedder.proj.1": "patch_embed.proj",
98-
# "extra_pos_embedder": "learnable_pos_embed",
9998
"final_layer.adaln_modulation.1": "norm_out.linear_1",
10099
"final_layer.adaln_modulation.2": "norm_out.linear_2",
101100
"final_layer.linear": "proj_out",

src/diffusers/loaders/single_file_model.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
convert_autoencoder_dc_checkpoint_to_diffusers,
3232
convert_chroma_transformer_checkpoint_to_diffusers,
3333
convert_controlnet_checkpoint,
34+
convert_cosmos_transformer_checkpoint_to_diffusers,
3435
convert_flux_transformer_checkpoint_to_diffusers,
3536
convert_hidream_transformer_to_diffusers,
3637
convert_hunyuan_video_transformer_to_diffusers,
@@ -143,6 +144,10 @@
143144
"checkpoint_mapping_fn": convert_hidream_transformer_to_diffusers,
144145
"default_subfolder": "transformer",
145146
},
147+
"CosmosTransformer3DModel": {
148+
"checkpoint_mapping_fn": convert_cosmos_transformer_checkpoint_to_diffusers,
149+
"default_subfolder": "transformer",
150+
},
146151
}
147152

148153

src/diffusers/loaders/single_file_utils.py

Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,16 @@
127127
"wan": ["model.diffusion_model.head.modulation", "head.modulation"],
128128
"wan_vae": "decoder.middle.0.residual.0.gamma",
129129
"hidream": "double_stream_blocks.0.block.adaLN_modulation.1.bias",
130+
"cosmos-1.0": [
131+
"net.x_embedder.proj.1.weight",
132+
"net.blocks.block1.blocks.0.block.attn.to_q.0.weight",
133+
"net.extra_pos_embedder.pos_emb_h",
134+
],
135+
"cosmos-2.0": [
136+
"net.x_embedder.proj.1.weight",
137+
"net.blocks.0.self_attn.q_proj.weight",
138+
"net.pos_embedder.dim_spatial_range",
139+
],
130140
}
131141

132142
DIFFUSERS_DEFAULT_PIPELINE_PATHS = {
@@ -193,6 +203,14 @@
193203
"wan-t2v-14B": {"pretrained_model_name_or_path": "Wan-AI/Wan2.1-T2V-14B-Diffusers"},
194204
"wan-i2v-14B": {"pretrained_model_name_or_path": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"},
195205
"hidream": {"pretrained_model_name_or_path": "HiDream-ai/HiDream-I1-Dev"},
206+
"cosmos-1.0-t2w-7B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-7B-Text2World"},
207+
"cosmos-1.0-t2w-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-14B-Text2World"},
208+
"cosmos-1.0-v2w-7B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-7B-Video2World"},
209+
"cosmos-1.0-v2w-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-1.0-Diffusion-14B-Video2World"},
210+
"cosmos-2.0-t2i-2B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-2B-Text2Image"},
211+
"cosmos-2.0-t2i-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-14B-Text2Image"},
212+
"cosmos-2.0-v2w-2B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-2B-Video2World"},
213+
"cosmos-2.0-v2w-14B": {"pretrained_model_name_or_path": "nvidia/Cosmos-Predict2-14B-Video2World"},
196214
}
197215

198216
# Use to configure model sample size when original config is provided
@@ -704,11 +722,32 @@ def infer_diffusers_model_type(checkpoint):
704722
model_type = "wan-t2v-14B"
705723
else:
706724
model_type = "wan-i2v-14B"
725+
707726
elif CHECKPOINT_KEY_NAMES["wan_vae"] in checkpoint:
708727
# All Wan models use the same VAE so we can use the same default model repo to fetch the config
709728
model_type = "wan-t2v-14B"
729+
710730
elif CHECKPOINT_KEY_NAMES["hidream"] in checkpoint:
711731
model_type = "hidream"
732+
733+
elif all(key in checkpoint for key in CHECKPOINT_KEY_NAMES["cosmos-1.0"]):
734+
x_embedder_shape = checkpoint[CHECKPOINT_KEY_NAMES["cosmos-1.0"][0]].shape
735+
if x_embedder_shape[1] == 68:
736+
model_type = "cosmos-1.0-t2w-7B" if x_embedder_shape[0] == 4096 else "cosmos-1.0-t2w-14B"
737+
elif x_embedder_shape[1] == 72:
738+
model_type = "cosmos-1.0-v2w-7B" if x_embedder_shape[0] == 4096 else "cosmos-1.0-v2w-14B"
739+
else:
740+
raise ValueError(f"Unexpected x_embedder shape: {x_embedder_shape} when loading Cosmos 1.0 model.")
741+
742+
elif all(key in checkpoint for key in CHECKPOINT_KEY_NAMES["cosmos-2.0"]):
743+
x_embedder_shape = checkpoint[CHECKPOINT_KEY_NAMES["cosmos-2.0"][0]].shape
744+
if x_embedder_shape[1] == 68:
745+
model_type = "cosmos-2.0-t2i-2B" if x_embedder_shape[0] == 2048 else "cosmos-2.0-t2i-14B"
746+
elif x_embedder_shape[1] == 72:
747+
model_type = "cosmos-2.0-v2w-2B" if x_embedder_shape[0] == 2048 else "cosmos-2.0-v2w-14B"
748+
else:
749+
raise ValueError(f"Unexpected x_embedder shape: {x_embedder_shape} when loading Cosmos 2.0 model.")
750+
712751
else:
713752
model_type = "v1"
714753

@@ -3479,3 +3518,116 @@ def swap_scale_shift(weight):
34793518
converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias")
34803519

34813520
return converted_state_dict
3521+
3522+
3523+
def convert_cosmos_transformer_checkpoint_to_diffusers(checkpoint, **kwargs):
3524+
converted_state_dict = {key: checkpoint.pop(key) for key in list(checkpoint.keys())}
3525+
3526+
def remove_keys_(key: str, state_dict):
3527+
state_dict.pop(key)
3528+
3529+
def rename_transformer_blocks_(key: str, state_dict):
3530+
block_index = int(key.split(".")[1].removeprefix("block"))
3531+
new_key = key
3532+
old_prefix = f"blocks.block{block_index}"
3533+
new_prefix = f"transformer_blocks.{block_index}"
3534+
new_key = new_prefix + new_key.removeprefix(old_prefix)
3535+
state_dict[new_key] = state_dict.pop(key)
3536+
3537+
TRANSFORMER_KEYS_RENAME_DICT_COSMOS_1_0 = {
3538+
"t_embedder.1": "time_embed.t_embedder",
3539+
"affline_norm": "time_embed.norm",
3540+
".blocks.0.block.attn": ".attn1",
3541+
".blocks.1.block.attn": ".attn2",
3542+
".blocks.2.block": ".ff",
3543+
".blocks.0.adaLN_modulation.1": ".norm1.linear_1",
3544+
".blocks.0.adaLN_modulation.2": ".norm1.linear_2",
3545+
".blocks.1.adaLN_modulation.1": ".norm2.linear_1",
3546+
".blocks.1.adaLN_modulation.2": ".norm2.linear_2",
3547+
".blocks.2.adaLN_modulation.1": ".norm3.linear_1",
3548+
".blocks.2.adaLN_modulation.2": ".norm3.linear_2",
3549+
"to_q.0": "to_q",
3550+
"to_q.1": "norm_q",
3551+
"to_k.0": "to_k",
3552+
"to_k.1": "norm_k",
3553+
"to_v.0": "to_v",
3554+
"layer1": "net.0.proj",
3555+
"layer2": "net.2",
3556+
"proj.1": "proj",
3557+
"x_embedder": "patch_embed",
3558+
"extra_pos_embedder": "learnable_pos_embed",
3559+
"final_layer.adaLN_modulation.1": "norm_out.linear_1",
3560+
"final_layer.adaLN_modulation.2": "norm_out.linear_2",
3561+
"final_layer.linear": "proj_out",
3562+
}
3563+
3564+
TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_1_0 = {
3565+
"blocks.block": rename_transformer_blocks_,
3566+
"logvar.0.freqs": remove_keys_,
3567+
"logvar.0.phases": remove_keys_,
3568+
"logvar.1.weight": remove_keys_,
3569+
"pos_embedder.seq": remove_keys_,
3570+
}
3571+
3572+
TRANSFORMER_KEYS_RENAME_DICT_COSMOS_2_0 = {
3573+
"t_embedder.1": "time_embed.t_embedder",
3574+
"t_embedding_norm": "time_embed.norm",
3575+
"blocks": "transformer_blocks",
3576+
"adaln_modulation_self_attn.1": "norm1.linear_1",
3577+
"adaln_modulation_self_attn.2": "norm1.linear_2",
3578+
"adaln_modulation_cross_attn.1": "norm2.linear_1",
3579+
"adaln_modulation_cross_attn.2": "norm2.linear_2",
3580+
"adaln_modulation_mlp.1": "norm3.linear_1",
3581+
"adaln_modulation_mlp.2": "norm3.linear_2",
3582+
"self_attn": "attn1",
3583+
"cross_attn": "attn2",
3584+
"q_proj": "to_q",
3585+
"k_proj": "to_k",
3586+
"v_proj": "to_v",
3587+
"output_proj": "to_out.0",
3588+
"q_norm": "norm_q",
3589+
"k_norm": "norm_k",
3590+
"mlp.layer1": "ff.net.0.proj",
3591+
"mlp.layer2": "ff.net.2",
3592+
"x_embedder.proj.1": "patch_embed.proj",
3593+
"final_layer.adaln_modulation.1": "norm_out.linear_1",
3594+
"final_layer.adaln_modulation.2": "norm_out.linear_2",
3595+
"final_layer.linear": "proj_out",
3596+
}
3597+
3598+
TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_2_0 = {
3599+
"accum_video_sample_counter": remove_keys_,
3600+
"accum_image_sample_counter": remove_keys_,
3601+
"accum_iteration": remove_keys_,
3602+
"accum_train_in_hours": remove_keys_,
3603+
"pos_embedder.seq": remove_keys_,
3604+
"pos_embedder.dim_spatial_range": remove_keys_,
3605+
"pos_embedder.dim_temporal_range": remove_keys_,
3606+
"_extra_state": remove_keys_,
3607+
}
3608+
3609+
PREFIX_KEY = "net."
3610+
if "net.blocks.block1.blocks.0.block.attn.to_q.0.weight" in checkpoint:
3611+
TRANSFORMER_KEYS_RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT_COSMOS_1_0
3612+
TRANSFORMER_SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_1_0
3613+
else:
3614+
TRANSFORMER_KEYS_RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT_COSMOS_2_0
3615+
TRANSFORMER_SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_2_0
3616+
3617+
state_dict_keys = list(converted_state_dict.keys())
3618+
for key in state_dict_keys:
3619+
new_key = key[:]
3620+
if new_key.startswith(PREFIX_KEY):
3621+
new_key = new_key.removeprefix(PREFIX_KEY)
3622+
for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items():
3623+
new_key = new_key.replace(replace_key, rename_key)
3624+
converted_state_dict[new_key] = converted_state_dict.pop(key)
3625+
3626+
state_dict_keys = list(converted_state_dict.keys())
3627+
for key in state_dict_keys:
3628+
for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items():
3629+
if special_key not in key:
3630+
continue
3631+
handler_fn_inplace(key, converted_state_dict)
3632+
3633+
return converted_state_dict

src/diffusers/models/transformers/transformer_cosmos.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import torch.nn.functional as F
2121

2222
from ...configuration_utils import ConfigMixin, register_to_config
23+
from ...loaders import FromOriginalModelMixin
2324
from ...utils import is_torchvision_available
2425
from ..attention import FeedForward
2526
from ..attention_processor import Attention
@@ -377,7 +378,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
377378
return (emb / norm).type_as(hidden_states)
378379

379380

380-
class CosmosTransformer3DModel(ModelMixin, ConfigMixin):
381+
class CosmosTransformer3DModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
381382
r"""
382383
A Transformer model for video-like data used in [Cosmos](https://github.com/NVIDIA/Cosmos).
383384

0 commit comments

Comments
 (0)