-
Notifications
You must be signed in to change notification settings - Fork 133
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
836cf85
commit 9207c43
Showing
14 changed files
with
423 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
Empty file.
18 changes: 18 additions & 0 deletions
18
xfuser/model_executor/plugins/first_block_cache/diffusers_adapters/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
import importlib | ||
|
||
from diffusers import DiffusionPipeline | ||
|
||
|
||
def apply_fbcache_on_transformer(transformer, *args, **kwargs): | ||
transformer_cls_name = transformer.__class__.__name__ | ||
if False: | ||
pass | ||
elif transformer_cls_name.startswith("Flux") or transformer_cls_name.startswith("xFuserFlux"): | ||
adapter_name = "flux" | ||
else: | ||
raise ValueError(f"Unknown transformer class name: {transformer_cls_name}") | ||
|
||
adapter_module = importlib.import_module(f".{adapter_name}", __package__) | ||
apply_cache_on_transformer_fn = getattr(adapter_module, "apply_cache_on_transformer") | ||
return apply_cache_on_transformer_fn(transformer, *args, **kwargs) | ||
|
56 changes: 56 additions & 0 deletions
56
xfuser/model_executor/plugins/first_block_cache/diffusers_adapters/flux.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
import functools | ||
import unittest | ||
|
||
import torch | ||
from diffusers import DiffusionPipeline, FluxTransformer2DModel | ||
|
||
from xfuser.model_executor.plugins.first_block_cache import utils | ||
|
||
|
||
def apply_cache_on_transformer( | ||
transformer: FluxTransformer2DModel, | ||
*, | ||
rel_l1_thresh=0.6, | ||
use_cache=True, | ||
return_hidden_states_first=False, | ||
): | ||
cached_transformer_blocks = torch.nn.ModuleList( | ||
[ | ||
utils.FBCachedTransformerBlocks( | ||
transformer.transformer_blocks, | ||
transformer.single_transformer_blocks, | ||
transformer=transformer, | ||
rel_l1_thresh=rel_l1_thresh, | ||
return_hidden_states_first=return_hidden_states_first, | ||
enable_fbcache=use_cache, | ||
) | ||
] | ||
) | ||
dummy_single_transformer_blocks = torch.nn.ModuleList() | ||
|
||
original_forward = transformer.forward | ||
|
||
@functools.wraps(original_forward) | ||
def new_forward( | ||
self, | ||
*args, | ||
**kwargs, | ||
): | ||
with unittest.mock.patch.object( | ||
self, | ||
"transformer_blocks", | ||
cached_transformer_blocks, | ||
), unittest.mock.patch.object( | ||
self, | ||
"single_transformer_blocks", | ||
dummy_single_transformer_blocks, | ||
): | ||
return original_forward( | ||
*args, | ||
**kwargs, | ||
) | ||
|
||
transformer.forward = new_forward.__get__(transformer) | ||
|
||
return transformer | ||
|
113 changes: 113 additions & 0 deletions
113
xfuser/model_executor/plugins/first_block_cache/utils.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,113 @@ | ||
import contextlib | ||
import dataclasses | ||
from collections import defaultdict | ||
from typing import DefaultDict, Dict | ||
from xfuser.core.distributed import ( | ||
get_sp_group, | ||
get_sequence_parallel_world_size, | ||
) | ||
|
||
import torch | ||
|
||
|
||
@dataclasses.dataclass | ||
class CacheContext: | ||
first_hidden_states_residual: torch.Tensor = None | ||
hidden_states_residual: torch.Tensor = None | ||
encoder_hidden_states_residual: torch.Tensor = None | ||
|
||
def clear_buffers(self): | ||
self.first_hidden_states_residual = None | ||
self.hidden_states_residual = None | ||
self.encoder_hidden_states_residual = None | ||
|
||
|
||
class FBCachedTransformerBlocks(torch.nn.Module): | ||
def __init__( | ||
self, | ||
transformer_blocks, | ||
single_transformer_blocks=None, | ||
*, | ||
transformer=None, | ||
rel_l1_thresh=0.6, | ||
return_hidden_states_first=True, | ||
enable_fbcache=True, | ||
): | ||
super().__init__() | ||
self.transformer = transformer | ||
self.transformer_blocks = transformer_blocks | ||
self.single_transformer_blocks = single_transformer_blocks | ||
self.rel_l1_thresh = rel_l1_thresh | ||
self.return_hidden_states_first = return_hidden_states_first | ||
self.enable_fbcache = enable_fbcache | ||
self.cache_context = CacheContext() | ||
|
||
def forward(self, hidden_states, encoder_hidden_states, *args, **kwargs): | ||
if not self.enable_fbcache: | ||
# the branch to disable cache | ||
for block in self.transformer_blocks: | ||
hidden_states, encoder_hidden_states = block(hidden_states, encoder_hidden_states, *args, **kwargs) | ||
if not self.return_hidden_states_first: | ||
hidden_states, encoder_hidden_states = encoder_hidden_states, hidden_states | ||
if self.single_transformer_blocks is not None: | ||
hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) | ||
for block in self.single_transformer_blocks: | ||
hidden_states = block(hidden_states, *args, **kwargs) | ||
hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :] | ||
return ( | ||
(hidden_states, encoder_hidden_states) | ||
if self.return_hidden_states_first | ||
else (encoder_hidden_states, hidden_states) | ||
) | ||
|
||
# run first block of transformer | ||
original_hidden_states = hidden_states | ||
first_transformer_block = self.transformer_blocks[0] | ||
hidden_states, encoder_hidden_states = first_transformer_block( | ||
hidden_states, encoder_hidden_states, *args, **kwargs | ||
) | ||
if not self.return_hidden_states_first: | ||
hidden_states, encoder_hidden_states = encoder_hidden_states, hidden_states | ||
first_hidden_states_residual = hidden_states - original_hidden_states | ||
del original_hidden_states | ||
|
||
prev_first_hidden_states_residual = self.cache_context.first_hidden_states_residual | ||
|
||
if prev_first_hidden_states_residual is None: | ||
use_cache = False | ||
else: | ||
mean_diff = (first_hidden_states_residual-prev_first_hidden_states_residual).abs().mean() | ||
mean_t1 = prev_first_hidden_states_residual.abs().mean() | ||
if get_sequence_parallel_world_size() > 1: | ||
mean_diff = get_sp_group().all_gather(mean_diff.unsqueeze(0)).mean() | ||
mean_t1 = get_sp_group().all_gather(mean_t1.unsqueeze(0)).mean() | ||
diff = mean_diff / mean_t1 | ||
use_cache = diff < self.rel_l1_thresh | ||
|
||
if use_cache: | ||
del first_hidden_states_residual | ||
hidden_states += self.cache_context.hidden_states_residual | ||
encoder_hidden_states += self.cache_context.encoder_hidden_states_residual | ||
else: | ||
original_hidden_states = hidden_states | ||
original_encoder_hidden_states = encoder_hidden_states | ||
self.cache_context.first_hidden_states_residual = first_hidden_states_residual | ||
for block in self.transformer_blocks[1:]: | ||
hidden_states, encoder_hidden_states = block(hidden_states, encoder_hidden_states, *args, **kwargs) | ||
if not self.return_hidden_states_first: | ||
hidden_states, encoder_hidden_states = encoder_hidden_states, hidden_states | ||
if self.single_transformer_blocks is not None: | ||
hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) | ||
for block in self.single_transformer_blocks: | ||
hidden_states = block(hidden_states, *args, **kwargs) | ||
encoder_hidden_states, hidden_states = hidden_states.split( | ||
[encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 | ||
) | ||
self.cache_context.hidden_states_residual = hidden_states - original_hidden_states | ||
self.cache_context.encoder_hidden_states_residual = encoder_hidden_states - original_encoder_hidden_states | ||
|
||
return ( | ||
(hidden_states, encoder_hidden_states) | ||
if self.return_hidden_states_first | ||
else (encoder_hidden_states, hidden_states) | ||
) |
Empty file.
17 changes: 17 additions & 0 deletions
17
xfuser/model_executor/plugins/teacache/diffusers_adapters/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
import importlib | ||
|
||
from diffusers import DiffusionPipeline | ||
|
||
|
||
def apply_teacache_on_transformer(transformer, *args, **kwargs): | ||
transformer_cls_name = transformer.__class__.__name__ | ||
if False: | ||
pass | ||
elif transformer_cls_name.startswith("Flux") or transformer_cls_name.startswith("xFuserFlux"): | ||
adapter_name = "flux" | ||
else: | ||
raise ValueError(f"Unknown transformer class name: {transformer_cls_name}") | ||
|
||
adapter_module = importlib.import_module(f".{adapter_name}", __package__) | ||
apply_cache_on_transformer_fn = getattr(adapter_module, "apply_cache_on_transformer") | ||
return apply_cache_on_transformer_fn(transformer, *args, **kwargs) |
59 changes: 59 additions & 0 deletions
59
xfuser/model_executor/plugins/teacache/diffusers_adapters/flux.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
import functools | ||
import unittest | ||
|
||
import torch | ||
from diffusers import DiffusionPipeline, FluxTransformer2DModel | ||
|
||
from xfuser.model_executor.plugins.teacache import utils | ||
|
||
|
||
def apply_cache_on_transformer( | ||
transformer: FluxTransformer2DModel, | ||
*, | ||
rel_l1_thresh=0.6, | ||
use_cache=True, | ||
num_steps=8, | ||
return_hidden_states_first=False, | ||
coefficients = [4.98651651e+02, -2.83781631e+02, 5.58554382e+01, -3.82021401e+00, 2.64230861e-01], | ||
): | ||
cached_transformer_blocks = torch.nn.ModuleList( | ||
[ | ||
utils.TeaCachedTransformerBlocks( | ||
transformer.transformer_blocks, | ||
transformer.single_transformer_blocks, | ||
transformer=transformer, | ||
enable_teacache=use_cache, | ||
num_steps=num_steps, | ||
rel_l1_thresh=rel_l1_thresh, | ||
return_hidden_states_first=return_hidden_states_first, | ||
coefficients=coefficients, | ||
) | ||
] | ||
) | ||
dummy_single_transformer_blocks = torch.nn.ModuleList() | ||
|
||
original_forward = transformer.forward | ||
|
||
@functools.wraps(original_forward) | ||
def new_forward( | ||
self, | ||
*args, | ||
**kwargs, | ||
): | ||
with unittest.mock.patch.object( | ||
self, | ||
"transformer_blocks", | ||
cached_transformer_blocks, | ||
), unittest.mock.patch.object( | ||
self, | ||
"single_transformer_blocks", | ||
dummy_single_transformer_blocks, | ||
): | ||
return original_forward( | ||
*args, | ||
**kwargs, | ||
) | ||
|
||
transformer.forward = new_forward.__get__(transformer) | ||
|
||
return transformer |
Oops, something went wrong.