Skip to content

Commit 85bfc39

Browse files
committed
Minor Chroma Radiance cleanups
1 parent d2e7b33 commit 85bfc39

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

comfy/ldm/chroma/layers_dct.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from comfy.ldm.flux.layers import RMSNorm
88

9+
910
class NerfEmbedder(nn.Module):
1011
"""
1112
An embedder module that combines input features with a 2D positional
@@ -130,6 +131,7 @@ def forward(self, inputs: torch.Tensor, embedder_dtype: torch.dtype) -> torch.Te
130131
# No-op if already the same dtype.
131132
return inputs.to(dtype=orig_dtype)
132133

134+
133135
class NerfGLUBlock(nn.Module):
134136
"""
135137
A NerfBlock using a Gated Linear Unit (GLU) like MLP.
@@ -182,6 +184,7 @@ def forward(self, x):
182184
# So we temporarily move the channel dimension to the end for the norm operation.
183185
return self.linear(self.norm(x.movedim(1, -1))).movedim(-1, 1)
184186

187+
185188
class NerfFinalLayerConv(nn.Module):
186189
def __init__(self, hidden_size, out_channels, dtype=None, device=None, operations=None):
187190
super().__init__()

comfy/ldm/chroma/model_dct.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,7 @@
1010
from einops import repeat
1111
import comfy.ldm.common_dit
1212

13-
from comfy.ldm.flux.layers import (
14-
EmbedND,
15-
timestep_embedding,
16-
)
13+
from comfy.ldm.flux.layers import EmbedND
1714

1815
from .layers import (
1916
DoubleStreamBlock,
@@ -29,6 +26,7 @@
2926

3027
from . import model as chroma_model
3128

29+
3230
@dataclass
3331
class ChromaRadianceParams(chroma_model.ChromaParams):
3432
patch_size: int

0 commit comments

Comments
 (0)