We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 735bb4b commit 5828607Copy full SHA for 5828607
comfy/model_management.py
@@ -340,7 +340,7 @@ def is_amd():
340
341
PRIORITIZE_FP16 = False # TODO: remove and replace with something that shows exactly which dtype is faster than the other
342
try:
343
- if is_nvidia() and PerformanceFeature.Fp16Accumulation in args.fast:
+ if (is_nvidia() or is_amd()) and PerformanceFeature.Fp16Accumulation in args.fast:
344
torch.backends.cuda.matmul.allow_fp16_accumulation = True
345
PRIORITIZE_FP16 = True # TODO: limit to cards where it actually boosts performance
346
logging.info("Enabled fp16 accumulation.")
0 commit comments