Skip to content

Commit

Permalink
add batch_fusion opt
Browse files Browse the repository at this point in the history
Summary: introduce `--torchinductor_enable_batch_fusion`

Reviewed By: xuzhao9

Differential Revision: D48382188

fbshipit-source-id: 40e4e82b75b53b6d2f172689ea0c2f2ef8ced206
  • Loading branch information
chaekit authored and facebook-github-bot committed Aug 17, 2023
1 parent ee5266b commit 208068d
Showing 1 changed file with 7 additions and 0 deletions.
7 changes: 7 additions & 0 deletions torchbenchmark/util/backends/torchdynamo.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,11 @@ def parse_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', dy
action='store_true',
help="enable group fusion in Inductor"
)
parser.add_argument(
"--torchinductor_enable_batch_fusion",
action='store_true',
help="enable batch fusion in Inductor"
)
parser.add_argument(
"--dynamo_disable_optimizer_step",
type=distutils.util.strtobool,
Expand Down Expand Up @@ -86,6 +91,8 @@ def apply_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', ar
# torchinductor.config.triton.use_bmm = True
if args.torchinductor_enable_group_fusion:
torchinductor.config.group_fusion = True
if args.torchinductor_enable_batch_fusion:
torchinductor.config.batch_fusion = True

# used for correctness checks, to avoid triton rand() behaving differently from torch rand().
torchinductor.config.fallback_random = bool(args.torchinductor_fallback_random)
Expand Down

0 comments on commit 208068d

Please sign in to comment.