Skip to content

Commit 2b4a12b

Browse files
authored
Reduce Glm4v model test size significantly (#39173)
* fix test size * Update test_modeling_glm4v.py
1 parent e355c0a commit 2b4a12b

File tree

1 file changed

+8
-14
lines changed

1 file changed

+8
-14
lines changed

tests/models/glm4v/test_modeling_glm4v.py

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -69,16 +69,15 @@ def __init__(
6969
is_training=True,
7070
text_config={
7171
"vocab_size": 99,
72-
"hidden_size": 32,
73-
"intermediate_size": 37,
74-
"num_hidden_layers": 4,
75-
"num_attention_heads": 4,
76-
"num_key_value_heads": 2,
72+
"hidden_size": 16,
73+
"intermediate_size": 22,
74+
"num_hidden_layers": 2,
75+
"num_attention_heads": 2,
76+
"num_key_value_heads": 1,
7777
"output_channels": 64,
7878
"hidden_act": "silu",
7979
"max_position_embeddings": 512,
8080
"rope_scaling": {"type": "default", "mrope_section": [2, 1, 1]},
81-
"max_window_layers": 3,
8281
"rope_theta": 10000,
8382
"tie_word_embeddings": True,
8483
"bos_token_id": 0,
@@ -87,11 +86,10 @@ def __init__(
8786
},
8887
vision_config={
8988
"depth": 2,
90-
"embed_dim": 32,
9189
"hidden_act": "silu",
92-
"hidden_size": 32,
93-
"mlp_ratio": 4,
94-
"num_heads": 4,
90+
"hidden_size": 48,
91+
"out_hidden_size": 16,
92+
"intermediate_size": 22,
9593
"patch_size": 14,
9694
"spatial_merge_size": 1,
9795
"temporal_patch_size": 2,
@@ -239,10 +237,6 @@ def test_sdpa_can_dispatch_on_flash(self):
239237
def test_multi_gpu_data_parallel_forward(self):
240238
pass
241239

242-
@unittest.skip(reason="We cannot configure to output a smaller model.")
243-
def test_model_is_small(self):
244-
pass
245-
246240
@unittest.skip("Error with compilation")
247241
def test_generate_from_inputs_embeds_with_static_cache(self):
248242
pass

0 commit comments

Comments
 (0)