-
Notifications
You must be signed in to change notification settings - Fork 6.1k
[tests] unbloat tests/lora/utils.py
#11845
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
db03fc9
27fe7c5
fc88ac4
fb18269
be922ae
66f922c
705a9fd
395f9d9
9fd5c9a
1d6aa8a
e56b8d7
6cf2933
f86ccac
810726c
f72ada1
1200b24
39a2784
36bc333
b48fde5
fc6fb85
fe1af35
9445c4b
fd084dd
47a747c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -120,11 +120,25 @@ def get_dummy_inputs(self, with_generator=True): | |
|
||
return noise, input_ids, pipeline_inputs | ||
|
||
def test_simple_inference_with_text_lora_denoiser_fused_multi(self): | ||
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) | ||
|
||
def test_simple_inference_with_text_denoiser_lora_unfused(self): | ||
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) | ||
@parameterized.expand([("simple",), ("weighted",), ("block_lora",), ("delete_adapter",)]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do the scenarios have to be single element tuples? And is |
||
def test_lora_set_adapters_scenarios(self, scenario): | ||
super()._test_lora_set_adapters_scenarios(scenario, expected_atol=9e-3) | ||
|
||
@parameterized.expand( | ||
[ | ||
# Test actions on text_encoder LoRA only | ||
("fused", "text_encoder_only"), | ||
("unloaded", "text_encoder_only"), | ||
("save_load", "text_encoder_only"), | ||
# Test actions on both text_encoder and denoiser LoRA | ||
("fused", "text_and_denoiser"), | ||
("unloaded", "text_and_denoiser"), | ||
("unfused", "text_and_denoiser"), | ||
("save_load", "text_and_denoiser"), | ||
] | ||
) | ||
def test_lora_actions(self, action, components_to_add): | ||
super()._test_lora_actions(action, components_to_add, expected_atol=9e-3) | ||
|
||
def test_lora_scale_kwargs_match_fusion(self): | ||
super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3) | ||
|
@@ -136,38 +150,8 @@ def test_group_offloading_inference_denoiser(self, offload_type, use_stream): | |
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338 | ||
super()._test_group_offloading_inference_denoiser(offload_type, use_stream) | ||
|
||
@unittest.skip("Not supported in CogVideoX.") | ||
def test_simple_inference_with_text_denoiser_block_scale(self): | ||
pass | ||
|
||
@unittest.skip("Not supported in CogVideoX.") | ||
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): | ||
pass | ||
|
||
@unittest.skip("Not supported in CogVideoX.") | ||
def test_modify_padding_mode(self): | ||
pass | ||
|
||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.") | ||
def test_simple_inference_with_partial_text_lora(self): | ||
pass | ||
|
||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.") | ||
def test_simple_inference_with_text_lora(self): | ||
pass | ||
|
||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.") | ||
def test_simple_inference_with_text_lora_and_scale(self): | ||
pass | ||
|
||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.") | ||
def test_simple_inference_with_text_lora_fused(self): | ||
pass | ||
|
||
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.") | ||
def test_simple_inference_with_text_lora_save_load(self): | ||
pass | ||
|
||
@unittest.skip("Not supported in CogVideoX.") | ||
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): | ||
pass | ||
# TODO: skip them properly | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this comment, on its own, is not very helpful in explain what it is that needs to be done here. |
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These are skipped appropriately from the parent method. I think it's okay in this case, because it eases things a bit.