Skip to content

Commit e338d29

Browse files
committed
cr fixes 3
1 parent 1eee328 commit e338d29

File tree

3 files changed

+293
-64
lines changed

3 files changed

+293
-64
lines changed

invokeai/app/invocations/bria_decoder.py renamed to invokeai/app/invocations/bria_latents_to_image.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@
99

1010

1111
@invocation(
12-
"bria_decoder",
13-
title="Decoder - Bria",
12+
"bria_latents_to_image",
13+
title="Latents to Image - Bria",
1414
tags=["image", "bria"],
1515
category="image",
1616
version="1.0.0",
1717
classification=Classification.Prototype,
1818
)
19-
class BriaDecoderInvocation(BaseInvocation):
19+
class BriaLatentsToImageInvocation(BaseInvocation):
2020
"""
2121
Decode Bria latents to an image.
2222
"""

invokeai/backend/bria/pipeline_bria_controlnet.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -228,8 +228,6 @@ def get_control_start_end(self, control_guidance_start, control_guidance_end):
228228
def __call__(
229229
self,
230230
prompt: Union[str, List[str]] = None,
231-
height: Optional[int] = None,
232-
width: Optional[int] = None,
233231
num_inference_steps: int = 30,
234232
timesteps: List[int] = None,
235233
guidance_scale: float = 3.5,
@@ -259,10 +257,6 @@ def __call__(
259257
prompt (`str` or `List[str]`, *optional*):
260258
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
261259
instead.
262-
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
263-
The height in pixels of the generated image. This is set to 1024 by default for the best results.
264-
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
265-
The width in pixels of the generated image. This is set to 1024 by default for the best results.
266260
num_inference_steps (`int`, *optional*, defaults to 50):
267261
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
268262
expense of slower inference.
@@ -323,8 +317,6 @@ def __call__(
323317
`tuple`. When returning a tuple, the first element is a list with the generated images.
324318
"""
325319

326-
height = height or self.default_sample_size * self.vae_scale_factor
327-
width = width or self.default_sample_size * self.vae_scale_factor
328320
control_guidance_start, control_guidance_end = self.get_control_start_end(
329321
control_guidance_start=control_guidance_start, control_guidance_end=control_guidance_end
330322
)
@@ -335,8 +327,6 @@ def __call__(
335327
)
336328
self.check_inputs(
337329
prompt,
338-
height,
339-
width,
340330
negative_prompt=negative_prompt,
341331
prompt_embeds=prompt_embeds,
342332
negative_prompt_embeds=negative_prompt_embeds,
@@ -517,7 +507,7 @@ def __call__(
517507
order=1,
518508
total_steps=num_inference_steps,
519509
timestep=int(t),
520-
latents=latents.view(1, 64, 64, 4, 2, 2).permute(0, 3, 1, 4, 2, 5).reshape(1, 4, 128, 128),
510+
latents=self._unpack_latents(latents, height, width, self.vae_scale_factor),
521511
),
522512
)
523513

0 commit comments

Comments
 (0)