|
1 | 1 | import math |
2 | 2 | import torch |
3 | 3 | from torch import nn |
4 | | -from .ldm.modules.attention import CrossAttention |
5 | | -from inspect import isfunction |
| 4 | +from .ldm.modules.attention import CrossAttention, FeedForward |
6 | 5 | import comfy.ops |
7 | 6 | ops = comfy.ops.manual_cast |
8 | 7 |
|
9 | | -def exists(val): |
10 | | - return val is not None |
11 | | - |
12 | | - |
13 | | -def uniq(arr): |
14 | | - return{el: True for el in arr}.keys() |
15 | | - |
16 | | - |
17 | | -def default(val, d): |
18 | | - if exists(val): |
19 | | - return val |
20 | | - return d() if isfunction(d) else d |
21 | | - |
22 | | - |
23 | | -# feedforward |
24 | | -class GEGLU(nn.Module): |
25 | | - def __init__(self, dim_in, dim_out): |
26 | | - super().__init__() |
27 | | - self.proj = ops.Linear(dim_in, dim_out * 2) |
28 | | - |
29 | | - def forward(self, x): |
30 | | - x, gate = self.proj(x).chunk(2, dim=-1) |
31 | | - return x * torch.nn.functional.gelu(gate) |
32 | | - |
33 | | - |
34 | | -class FeedForward(nn.Module): |
35 | | - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): |
36 | | - super().__init__() |
37 | | - inner_dim = int(dim * mult) |
38 | | - dim_out = default(dim_out, dim) |
39 | | - project_in = nn.Sequential( |
40 | | - ops.Linear(dim, inner_dim), |
41 | | - nn.GELU() |
42 | | - ) if not glu else GEGLU(dim, inner_dim) |
43 | | - |
44 | | - self.net = nn.Sequential( |
45 | | - project_in, |
46 | | - nn.Dropout(dropout), |
47 | | - ops.Linear(inner_dim, dim_out) |
48 | | - ) |
49 | | - |
50 | | - def forward(self, x): |
51 | | - return self.net(x) |
52 | | - |
53 | 8 |
|
54 | 9 | class GatedCrossAttentionDense(nn.Module): |
55 | 10 | def __init__(self, query_dim, context_dim, n_heads, d_head): |
|
0 commit comments