We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ea3e0ee commit 77fa5dfCopy full SHA for 77fa5df
tests/unittest/_torch/auto_deploy/unit/multigpu/custom_ops/test_mxfp4_moe_ep.py
@@ -5,12 +5,11 @@
5
import torch.distributed as dist
6
from _dist_test_utils import get_device_counts
7
8
+from tensorrt_llm._torch.auto_deploy.custom_ops.fused_moe.mxfp4_moe import (
9
+ IS_TRITON_KERNELS_AVAILABLE,
10
+)
11
from tensorrt_llm._torch.auto_deploy.distributed.common import spawn_multiprocess_job
12
-# FIXME: https://nvbugspro.nvidia.com/bug/5604136.
-# from tensorrt_llm._torch.auto_deploy.custom_ops.mxfp4_moe import IS_TRITON_KERNELS_AVAILABLE
-IS_TRITON_KERNELS_AVAILABLE = False
13
-
14
15
def _split_range_last_remainder(n: int, world_size: int, rank: int):
16
"""[lo, hi) split along dim0; last rank gets remainder."""
0 commit comments