Skip to content

Commit 6e6c41a

Browse files
committed
add script to train with ft
Summary: the script adds configuration options to run training locally with ft enabled
1 parent ab1ed89 commit 6e6c41a

File tree

2 files changed

+110
-0
lines changed

2 files changed

+110
-0
lines changed

run_train_ft.sh

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
#!/bin/bash
2+
3+
set -ex
4+
5+
export CONFIG_FILE=${CONFIG_FILE:-"./torchtitan/models/llama3/train_configs/debug_model_ft.toml"}
6+
7+
export FT_REPLICA_ID="${FT_REPLICA_ID:-0}"
8+
export FT_GROUP_SIZE="${FT_GROUP_SIZE:-1}"
9+
10+
./run_train.sh \
11+
--fault_tolerance.group_size="${FT_GROUP_SIZE}" \
12+
--fault_tolerance.replica_id="${FT_REPLICA_ID}"
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
[job]
2+
dump_folder = "./outputs"
3+
description = "Llama 3 debug training"
4+
print_args = false
5+
6+
[profiling]
7+
enable_profiling = true
8+
save_traces_folder = "profile_trace"
9+
profile_freq = 10
10+
profiler_active = 10
11+
profiler_warmup = 0
12+
enable_memory_snapshot = false
13+
save_memory_snapshot_folder = "memory_snapshot"
14+
15+
[metrics]
16+
log_freq = 1
17+
disable_color_printing = false
18+
enable_tensorboard = false
19+
save_tb_folder = "tb"
20+
enable_wandb = false
21+
22+
[model]
23+
name = "llama3"
24+
flavor = "debugmodel"
25+
# test folder with tokenizer.json, for debug purpose only
26+
hf_assets_path = "./tests/assets/tokenizer"
27+
# converters = ["float8"]
28+
29+
[optimizer]
30+
name = "AdamW"
31+
lr = 8e-4
32+
eps = 1e-8
33+
34+
[lr_scheduler]
35+
warmup_steps = 2 # lr scheduler warm up, normally 20% of the train steps
36+
decay_ratio = 0.8 # lr scheduler decay ratio, 80% of the train steps
37+
decay_type = "linear"
38+
min_lr_factor = 0.0
39+
40+
[training]
41+
local_batch_size = 8
42+
seq_len = 2048
43+
max_norm = 1.0 # grad norm clipping
44+
steps = 100
45+
dataset = "c4_test" # supported datasets: c4_test (2K), c4 (177M)
46+
47+
[parallelism]
48+
data_parallel_replicate_degree = 1
49+
data_parallel_shard_degree = -1
50+
fsdp_reshard_after_forward = "default" # default / never / always
51+
tensor_parallel_degree = 1
52+
enable_async_tensor_parallel = false
53+
pipeline_parallel_degree = 1
54+
context_parallel_degree = 1
55+
56+
[checkpoint]
57+
enable = false
58+
folder = "checkpoint"
59+
interval = 10
60+
last_save_model_only = false
61+
export_dtype = "float32"
62+
async_mode = "disabled" # ["disabled", "async", "async_with_pinned_mem"]
63+
64+
[activation_checkpoint]
65+
mode = "selective" # ["none", "selective", "full"]
66+
selective_ac_option = (
67+
"2" # 'int' = ac every positive int layer or 'op', ac based on ops policy
68+
)
69+
70+
[compile]
71+
enable = false
72+
components = ["model", "loss"]
73+
74+
[quantize.linear.float8]
75+
enable_fsdp_float8_all_gather = false
76+
precompute_float8_dynamic_scale_for_fsdp = false
77+
filter_fqns = ["output"]
78+
79+
[validation]
80+
enable = false
81+
dataset = "c4_validation"
82+
freq = 5
83+
steps = 10
84+
85+
[comm]
86+
train_timeout_seconds = 15
87+
88+
[fault_tolerance]
89+
enable = true
90+
sync_steps = 10
91+
num_fragments = 2
92+
semi_sync_method = "diloco"
93+
data_parallel_shard_degree = 1
94+
process_group = "nccl"
95+
process_group_timeout_ms = 10000
96+
97+
[experimental]
98+
custom_args_module = "torchtitan.components.ft.config"

0 commit comments

Comments
 (0)