Skip to content

Commit bd0c0d7

Browse files
authored
Reduce more memories on free_gpu_mem option (#1915)
* Enhance free_gpu_mem option Unload cond_stage_model on free_gpu_mem option is setted * Enhance free_gpu_mem option Unload cond_stage_model on free_gpu_mem option is setted
1 parent f745f78 commit bd0c0d7

File tree

3 files changed

+17
-3
lines changed

3 files changed

+17
-3
lines changed

ldm/generate.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -453,6 +453,11 @@ def process_image(image,seed):
453453
init_image = None
454454
mask_image = None
455455

456+
457+
if self.free_gpu_mem and self.model.cond_stage_model.device != self.model.device:
458+
self.model.cond_stage_model.device = self.model.device
459+
self.model.cond_stage_model.to(self.model.device)
460+
456461
try:
457462
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
458463
prompt, model =self.model,

ldm/invoke/generator/txt2img.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import numpy as np
77
from ldm.invoke.generator.base import Generator
88
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
9+
import gc
910

1011

1112
class Txt2Img(Generator):
@@ -55,7 +56,11 @@ def make_image(x_T):
5556
)
5657

5758
if self.free_gpu_mem:
58-
self.model.model.to("cpu")
59+
self.model.model.to('cpu')
60+
self.model.cond_stage_model.device = 'cpu'
61+
self.model.cond_stage_model.to('cpu')
62+
gc.collect()
63+
torch.cuda.empty_cache()
5964

6065
return self.sample_to_image(samples)
6166

ldm/invoke/generator/txt2img2img.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,11 @@ def make_image(x_T):
100100
)
101101

102102
if self.free_gpu_mem:
103-
self.model.model.to("cpu")
103+
self.model.model.to('cpu')
104+
self.model.cond_stage_model.device = 'cpu'
105+
self.model.cond_stage_model.to('cpu')
106+
gc.collect()
107+
torch.cuda.empty_cache()
104108

105109
return self.sample_to_image(samples)
106110

@@ -142,7 +146,7 @@ def inpaint_make_image(x_T):
142146
**kwargs
143147
)
144148
return result[0][0]
145-
149+
146150
if sampler.uses_inpainting_model():
147151
return inpaint_make_image
148152
else:

0 commit comments

Comments
 (0)