Skip to content

Commit bcf9310

Browse files
committed
Add CI for Autoparallel experiment llama3 on 4 GPUs
stack-info: PR: #2105, branch: xmfan/stack/5
1 parent 571ce7c commit bcf9310

File tree

4 files changed

+147
-1
lines changed

4 files changed

+147
-1
lines changed
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
name: Auto Parallel 8 GPU Integration Tests
2+
3+
on:
4+
push:
5+
branches: [ main ]
6+
paths:
7+
- 'torchtitan/experiments/auto_parallel/**'
8+
- '.github/workflows/integration_test_8gpu_auto_parallel.yaml'
9+
pull_request:
10+
paths:
11+
- 'torchtitan/experiments/auto_parallel/**'
12+
- '.github/workflows/integration_test_8gpu_auto_parallel.yaml'
13+
schedule:
14+
# Runs every 12 hours
15+
- cron: '0 */12 * * *'
16+
17+
concurrency:
18+
group: unit-test${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_number || github.ref }}
19+
cancel-in-progress: true
20+
21+
defaults:
22+
run:
23+
shell: bash -l -eo pipefail {0}
24+
25+
jobs:
26+
build-test:
27+
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
28+
with:
29+
runner: linux.g5.48xlarge.nvidia.gpu
30+
gpu-arch-type: cuda
31+
gpu-arch-version: "12.6"
32+
# This image is faster to clone than the default, but it lacks CC needed by triton
33+
# (1m25s vs 2m37s).
34+
docker-image: torchtitan-ubuntu-20.04-clang12
35+
repository: pytorch/torchtitan
36+
upload-artifact: outputs
37+
script: |
38+
set -eux
39+
40+
# The generic Linux job chooses to use base env, not the one setup by the image
41+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
42+
conda activate "${CONDA_ENV}"
43+
44+
# Log CUDA driver version for debugging.
45+
DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader | head -n 1 || true)
46+
echo "CUDA driver version: ${DRIVER_VERSION}"
47+
48+
pip config --user set global.progress_bar off
49+
50+
python -m pip install --force-reinstall --pre torch --index-url https://download.pytorch.org/whl/nightly/cu126
51+
52+
# Install autoparallel - required dependency for auto_parallel experiment
53+
python -m pip install git+https://github.com/meta-pytorch/autoparallel.git
54+
55+
mkdir artifacts-to-be-uploaded
56+
python -m torchtitan.experiments.auto_parallel.tests.integration_tests artifacts-to-be-uploaded --ngpu 4

torchtitan/experiments/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,4 @@ We provide this `experiments/` folder to host experiments that add significant v
3232
| [gpt_oss](./gpt_oss/) | TBA | [@jianiw](https://github.com/jianiw) |
3333
| [compiler_toolkit](./compiler_toolkit/) | [![Compiler Toolkit 8 GPU Integration Tests](https://github.com/pytorch/torchtitan/actions/workflows/integration_test_8gpu_compiler_toolkit.yaml/badge.svg?branch=main)](https://github.com/pytorch/torchtitan/actions/workflows/integration_test_8gpu_compiler_toolkit.yaml?query=branch%3Amain) | [@SherlockNoMad](https://github.com/SherlockNoMad) [@yiming0416](https://github.com/yiming0416) |
3434
| [transformers_modeling_backend](./transformers_modeling_backend/) | [![Transformers modeling backend 8 GPU Integration Tests](https://github.com/pytorch/torchtitan/actions/workflows/integration_test_8gpu_transformers_modeling_backend.yaml/badge.svg?branch=main)](https://github.com/pytorch/torchtitan/actions/workflows/integration_test_8gpu_transformers_modeling_backend.yaml?query=branch%3Amain) | [@3outeille](https://github.com/3outeille) |
35-
| [auto_parallel](./auto_parallel/) | TBA | [@wconstab](https://github.com/wconstab) | [@xmfan](https://github.com/xmfan) |
35+
| [auto_parallel](./auto_parallel/) | [![Auto Parallel 8 GPU Integration Tests](https://github.com/pytorch/torchtitan/actions/workflows/integration_test_8gpu_auto_parallel.yaml/badge.svg?branch=main)](https://github.com/pytorch/torchtitan/actions/workflows/integration_test_8gpu_auto_parallel.yaml?query=branch%3Amain) | [@wconstab](https://github.com/wconstab) [@xmfan](https://github.com/xmfan) |
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import argparse
8+
import os
9+
10+
from tests.integration_tests import OverrideDefinitions
11+
from tests.integration_tests.run_tests import run_tests
12+
13+
14+
def build_auto_parallel_test_list() -> list[OverrideDefinitions]:
15+
"""
16+
returns a list of OverrideDefinitions that is used to generate
17+
variations of integration tests based on the same root config file.
18+
"""
19+
integration_tests_flavors = [
20+
# llama3 tests
21+
OverrideDefinitions(
22+
[
23+
[
24+
"--model.name auto_parallel.llama3",
25+
"--parallelism.data_parallel_shard_degree 2",
26+
"--parallelism.tensor_parallel_degree 2",
27+
"--job.custom_config_module=torchtitan.experiments.auto_parallel.job_config",
28+
],
29+
],
30+
"llama3 AutoParallel FSDP+TP",
31+
"llama3_autoparallel_fsdp_tp",
32+
ngpu=4,
33+
),
34+
# TODO: Re-enable this once we fix the test
35+
# deepseek_v3 tests
36+
# OverrideDefinitions(
37+
# [
38+
# [
39+
# "--model.name auto_parallel.deepseek_v3",
40+
# "--parallelism.data_parallel_shard_degree 2",
41+
# "--parallelism.expert_parallel_degree 2",
42+
# "--job.custom_config_module=torchtitan.experiments.auto_parallel.job_config",
43+
# "--activation_checkpoint.mode none",
44+
# ],
45+
# ],
46+
# "deepseek_v3 AutoParallel FSDP+TP+EP",
47+
# "deepseekv3_autoparallel_fsdp_tp_ep",
48+
# ngpu=4,
49+
# ),
50+
]
51+
return integration_tests_flavors
52+
53+
54+
_TEST_SUITES_FUNCTION = {
55+
"auto_parallel": build_auto_parallel_test_list,
56+
}
57+
58+
59+
def main():
60+
parser = argparse.ArgumentParser()
61+
parser.add_argument("output_dir")
62+
parser.add_argument(
63+
"--config_path",
64+
default="./tests/integration_tests/base_config.toml",
65+
help="Base config path for integration tests. This is the config that will be used as a base for all tests.",
66+
)
67+
parser.add_argument(
68+
"--test_name",
69+
default="all",
70+
help="test to run, acceptable values: `test_name` in `build_test_list` (default: all)",
71+
)
72+
parser.add_argument("--ngpu", default=8, type=int)
73+
args = parser.parse_args()
74+
75+
if not os.path.exists(args.output_dir):
76+
os.makedirs(args.output_dir)
77+
if os.listdir(args.output_dir):
78+
raise RuntimeError("Please provide an empty output directory.")
79+
80+
test_list = _TEST_SUITES_FUNCTION["auto_parallel"]()
81+
run_tests(args, test_list)
82+
83+
84+
if __name__ == "__main__":
85+
main()

0 commit comments

Comments
 (0)