Skip to content

Commit 9cb8870

Browse files
lora hotload and merge
1 parent d9c8128 commit 9cb8870

File tree

3 files changed

+114
-3
lines changed

3 files changed

+114
-3
lines changed

diffsynth/lora/flux_lora.py

Lines changed: 59 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,62 @@ def __init__(self, device="cpu", torch_dtype=torch.float32):
1010

1111
def load(self, model: torch.nn.Module, state_dict_lora, alpha=1.0):
1212
lora_prefix, model_resource = self.loader.match(model, state_dict_lora)
13-
self.loader.load(model, state_dict_lora, lora_prefix, alpha=alpha, model_resource=model_resource)
13+
self.loader.load(model, state_dict_lora, lora_prefix, alpha=alpha, model_resource=model_resource)
14+
15+
class LoraMerger(torch.nn.Module):
16+
def __init__(self, dim):
17+
super().__init__()
18+
self.weight_base = torch.nn.Parameter(torch.randn((dim,)))
19+
self.weight_lora = torch.nn.Parameter(torch.randn((dim,)))
20+
self.weight_cross = torch.nn.Parameter(torch.randn((dim,)))
21+
self.weight_out = torch.nn.Parameter(torch.ones((dim,)))
22+
self.bias = torch.nn.Parameter(torch.randn((dim,)))
23+
self.activation = torch.nn.Sigmoid()
24+
self.norm_base = torch.nn.LayerNorm(dim, eps=1e-5)
25+
self.norm_lora = torch.nn.LayerNorm(dim, eps=1e-5)
26+
27+
def forward(self, base_output, lora_outputs):
28+
norm_base_output = self.norm_base(base_output)
29+
norm_lora_outputs = self.norm_lora(lora_outputs)
30+
gate = self.activation(
31+
norm_base_output * self.weight_base \
32+
+ norm_lora_outputs * self.weight_lora \
33+
+ norm_base_output * norm_lora_outputs * self.weight_cross + self.bias
34+
)
35+
output = base_output + (self.weight_out * gate * lora_outputs).sum(dim=0)
36+
return output
37+
38+
class LoraPatcher(torch.nn.Module):
39+
def __init__(self, lora_patterns=None):
40+
super().__init__()
41+
if lora_patterns is None:
42+
lora_patterns = self.default_lora_patterns()
43+
model_dict = {}
44+
for lora_pattern in lora_patterns:
45+
name, dim = lora_pattern["name"], lora_pattern["dim"]
46+
model_dict[name.replace(".", "___")] = LoraMerger(dim)
47+
self.model_dict = torch.nn.ModuleDict(model_dict)
48+
49+
def default_lora_patterns(self):
50+
lora_patterns = []
51+
lora_dict = {
52+
"attn.a_to_qkv": 9216, "attn.a_to_out": 3072, "ff_a.0": 12288, "ff_a.2": 3072, "norm1_a.linear": 18432,
53+
"attn.b_to_qkv": 9216, "attn.b_to_out": 3072, "ff_b.0": 12288, "ff_b.2": 3072, "norm1_b.linear": 18432,
54+
}
55+
for i in range(19):
56+
for suffix in lora_dict:
57+
lora_patterns.append({
58+
"name": f"blocks.{i}.{suffix}",
59+
"dim": lora_dict[suffix]
60+
})
61+
lora_dict = {"to_qkv_mlp": 21504, "proj_out": 3072, "norm.linear": 9216}
62+
for i in range(38):
63+
for suffix in lora_dict:
64+
lora_patterns.append({
65+
"name": f"single_blocks.{i}.{suffix}",
66+
"dim": lora_dict[suffix]
67+
})
68+
return lora_patterns
69+
70+
def forward(self, base_output, lora_outputs, name):
71+
return self.model_dict[name.replace(".", "___")](base_output, lora_outputs)

diffsynth/pipelines/flux_image_new.py

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@
2121
from ..models.flux_infiniteyou import InfiniteYouImageProjector
2222
from ..models.tiler import FastTileWorker
2323
from .wan_video_new import BasePipeline, ModelConfig, PipelineUnitRunner, PipelineUnit
24-
from ..lora.flux_lora import FluxLoRALoader
24+
from ..lora.flux_lora import FluxLoRALoader,LoraPatcher
25+
from ..models.lora import FluxLoRAConverter
2526

2627
from transformers.models.t5.modeling_t5 import T5LayerNorm, T5DenseActDense, T5DenseGatedActDense
2728
from ..models.flux_dit import RMSNorm
@@ -121,6 +122,45 @@ def load_lora(self, module, path, alpha=1):
121122
lora = load_state_dict(path, torch_dtype=self.torch_dtype, device=self.device)
122123
loader.load(module, lora, alpha=alpha)
123124

125+
def enable_lora_hotload(self, lora_paths):
126+
# load lora state dict and align format
127+
lora_state_dicts = [
128+
FluxLoRAConverter.align_to_diffsynth_format(load_state_dict(path, torch_dtype=self.torch_dtype, device=self.device)) for path in lora_paths
129+
]
130+
lora_state_dicts = [l for l in lora_state_dicts if l != {}]
131+
132+
for name, module in self.dit.named_modules():
133+
if isinstance(module, torch.nn.Linear):
134+
lora_a_name = f'{name}.lora_A.default.weight'
135+
lora_b_name = f'{name}.lora_B.default.weight'
136+
lora_A_weights = []
137+
lora_B_weights = []
138+
for lora_dict in lora_state_dicts:
139+
if lora_a_name in lora_dict and lora_b_name in lora_dict:
140+
lora_A_weights.append(lora_dict[lora_a_name])
141+
lora_B_weights.append(lora_dict[lora_b_name])
142+
module.lora_A_weights = lora_A_weights
143+
module.lora_B_weights = lora_B_weights
144+
145+
146+
def enable_lora_patcher(self, lora_patcher_path):
147+
# load lora patcher
148+
lora_patcher = LoraPatcher().to(dtype=self.torch_dtype, device=self.device)
149+
lora_patcher.load_state_dict(load_state_dict(lora_patcher_path))
150+
151+
for name, module in self.dit.named_modules():
152+
if isinstance(module, torch.nn.Linear):
153+
merger_name = name.replace(".", "___")
154+
if merger_name in lora_patcher.model_dict:
155+
module.lora_merger = lora_patcher.model_dict[merger_name]
156+
157+
158+
def off_lora_hotload(self):
159+
for name, module in self.dit.named_modules():
160+
if isinstance(module, torch.nn.Linear):
161+
module.lora_A_weights = []
162+
module.lora_B_weights = []
163+
124164

125165
def training_loss(self, **inputs):
126166
timestep_id = torch.randint(0, self.scheduler.num_train_timesteps, (1,))

diffsynth/vram_management/layers.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,9 @@ def __init__(self, module: torch.nn.Linear, offload_dtype, offload_device, onloa
107107
self.vram_limit = vram_limit
108108
self.state = 0
109109
self.name = name
110+
self.lora_A_weights = []
111+
self.lora_B_weights = []
112+
self.lora_merger = None
110113

111114
def forward(self, x, *args, **kwargs):
112115
if self.state == 2:
@@ -120,7 +123,17 @@ def forward(self, x, *args, **kwargs):
120123
else:
121124
weight = cast_to(self.weight, self.computation_dtype, self.computation_device)
122125
bias = None if self.bias is None else cast_to(self.bias, self.computation_dtype, self.computation_device)
123-
return torch.nn.functional.linear(x, weight, bias)
126+
out = torch.nn.functional.linear(x, weight, bias)
127+
lora_output = []
128+
for lora_A, lora_B in zip(self.lora_A_weights, self.lora_B_weights):
129+
out_lora = x @ lora_A.T @ lora_B.T
130+
if self.lora_merger is None:
131+
out = out + out_lora
132+
lora_output.append(out_lora)
133+
if self.lora_merger is not None and len(lora_output) > 0:
134+
lora_output = torch.stack(lora_output)
135+
out = self.lora_merger(out, lora_output)
136+
return out
124137

125138

126139
def enable_vram_management_recursively(model: torch.nn.Module, module_map: dict, module_config: dict, max_num_param=None, overflow_module_config: dict = None, total_num_param=0, vram_limit=None, name_prefix=""):

0 commit comments

Comments
 (0)