Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions torchtitan/experiments/transformers_modeling_backend/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,26 @@


flavors = {
"debugperf": HFTransformerModelArgs(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we remove these 2 test models?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what's the difference between debugperf / debugperf_large and debugmodel? Can we just keep one of them?

titan_dense_args=TitanDenseModelArgs(
dim=256,
n_layers=6,
n_heads=16,
n_kv_heads=16,
vocab_size=2048,
rope_theta=500000,
),
),
"debugperf_large": HFTransformerModelArgs(
titan_dense_args=TitanDenseModelArgs(
dim=1024,
n_layers=12,
n_heads=16,
n_kv_heads=16,
vocab_size=32000,
rope_theta=500000,
),
),
"debugmodel": HFTransformerModelArgs(
titan_dense_args=TitanDenseModelArgs(
dim=256,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,8 @@ def apply_non_moe_tp(
layer_plan = {
"input_layernorm": SequenceParallel(),
"self_attn": prepare_module_input(
input_kwarg_layouts={"hidden_states": Shard(1)},
desired_input_kwarg_layouts={"hidden_states": Replicate()},
input_layouts=(Shard(1),),
desired_input_layouts=(Replicate(),),
),
"post_attention_layernorm": SequenceParallel(),
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
class HFTransformers:
model: str = ""
"""HuggingFace model ID (e.g., 'Qwen/Qwen3-4B-Instruct-2507')"""
tie_word_embeddings: bool = False
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Putting tie_word_embeddings into job config is a little bit confusing, and seems not related to this error?

IIUC this is a field is decided by model architecture, and not decided by each training run. So previously we put Qwen3's weight tying config into model_args:

enable_weight_tying: bool = False

"""Whether to tie input embeddings and output projection weights (default: True for HF models)"""


@dataclass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ def update_from_config(self, job_config: JobConfig):
self.mlp_bias = False
self.use_cache = False
self.initializer_range = 1.0 # use as std for normal init in embedding
self.tie_word_embeddings = job_config.hf_transformers.tie_word_embeddings

if not hasattr(self, "inter_dim"): # Only for llama model
ffn_hidden_size = 4 * self.dim
Expand Down
Loading
Loading