Skip to content

Commit 44d84e4

Browse files
committed
refactor: enhance attention backend assignment in SkyReelsV2AttnProcessor
1 parent 043952a commit 44d84e4

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/diffusers/models/transformers/transformer_skyreels_v2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
2525
from ...utils.torch_utils import maybe_allow_in_graph
2626
from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward
27-
from ..attention_dispatch import dispatch_attention_fn
27+
from ..attention_dispatch import dispatch_attention_fn, AttentionBackendName
2828
from ..cache_utils import CacheMixin
2929
from ..embeddings import (
3030
PixArtAlphaTextProjection,
@@ -126,7 +126,7 @@ def apply_rotary_emb(
126126
if not attn.is_cross_attention:
127127
attention_backend = None
128128
else:
129-
attention_backend = "flash_varlen"
129+
attention_backend = AttentionBackendName("flash_varlen")
130130

131131
# I2V task
132132
hidden_states_img = None

0 commit comments

Comments
 (0)