Skip to content

Commit 735bb4b

Browse files
Users report gfx1201 is buggy on flux with pytorch attention. (Comfy-Org#9244)
1 parent bf2a1b5 commit 735bb4b

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

comfy/model_management.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -321,9 +321,9 @@ def is_amd():
321321
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
322322
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950
323323
ENABLE_PYTORCH_ATTENTION = True
324-
if torch_version_numeric >= (2, 8):
325-
if any((a in arch) for a in ["gfx1201"]):
326-
ENABLE_PYTORCH_ATTENTION = True
324+
# if torch_version_numeric >= (2, 8):
325+
# if any((a in arch) for a in ["gfx1201"]):
326+
# ENABLE_PYTORCH_ATTENTION = True
327327
if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4):
328328
if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches
329329
SUPPORT_FP8_OPS = True

0 commit comments

Comments
 (0)