Skip to content

Commit 8e9ccd9

Browse files
committed
fix bug of ci when we use mocker
Signed-off-by: bhsueh <[email protected]>
1 parent 4541655 commit 8e9ccd9

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

tests/integration/defs/accuracy/test_llm_api_pytorch.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2736,7 +2736,7 @@ def test_w4_1gpu(self, moe_backend, cuda_graph, overlap_scheduler, mocker):
27362736

27372737
with llm:
27382738
model_name = "GPT-OSS/MXFP4"
2739-
mocker.patch.object(GSM8K, {"MAX_OUTPUT_LEN": 8192})
2739+
mocker.patch.object(GSM8K, "MAX_OUTPUT_LEN": 8192)
27402740
task = GSM8K(model_name)
27412741
task.evaluate(llm,
27422742
extra_evaluator_kwargs=self.extra_evaluator_kwargs)
@@ -2776,7 +2776,7 @@ def test_w4_4gpus(self, moe_backend, tp_size, pp_size, ep_size,
27762776
with llm:
27772777
model_name = "GPT-OSS/MXFP4"
27782778
task = GSM8K(model_name)
2779-
mocker.patch.object(GSM8K, {"MAX_OUTPUT_LEN": 8192})
2779+
mocker.patch.object(GSM8K, "MAX_OUTPUT_LEN": 8192)
27802780
task.evaluate(llm,
27812781
extra_evaluator_kwargs=self.extra_evaluator_kwargs)
27822782

@@ -2807,7 +2807,7 @@ def test_w4a16(self, tp_size, pp_size, ep_size, attention_dp, cuda_graph,
28072807
with llm:
28082808
model_name = "GPT-OSS/BF16"
28092809
task = GSM8K(model_name)
2810-
mocker.patch.object(GSM8K, {"MAX_OUTPUT_LEN": 8192})
2810+
mocker.patch.object(GSM8K, "MAX_OUTPUT_LEN": 8192)
28112811
task.evaluate(llm,
28122812
extra_evaluator_kwargs=self.extra_evaluator_kwargs)
28132813

0 commit comments

Comments
 (0)