Skip to content

Commit 1b123ce

Browse files
crazydemodominicshanshan
authored andcommitted
[None][fix] fix Llama3 eagle3 test case OOM (NVIDIA#6832)
Signed-off-by: Ivy Zhang <[email protected]> Signed-off-by: Wangshanshan <[email protected]>
1 parent ab80032 commit 1b123ce

File tree

5 files changed

+15
-11
lines changed

5 files changed

+15
-11
lines changed

tests/integration/defs/accuracy/references/cnn_dailymail.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,8 @@ meta-llama/Llama-3.2-3B:
184184
kv_cache_quant_algo: FP8
185185
accuracy: 33.629
186186
meta-llama/Llama-3.3-70B-Instruct:
187-
- spec_dec_algo: Eagle
187+
- quant_algo: FP8
188+
spec_dec_algo: Eagle
188189
accuracy: 33.244
189190
- quant_algo: NVFP4
190191
kv_cache_quant_algo: FP8

tests/integration/defs/accuracy/references/mmlu.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,8 @@ meta-llama/Llama-3.2-3B:
5959
accuracy: 60.60
6060
meta-llama/Llama-3.3-70B-Instruct:
6161
- accuracy: 81.31
62-
- spec_dec_algo: Eagle
62+
- quant_algo: FP8
63+
spec_dec_algo: Eagle
6364
accuracy: 81.31
6465
- quant_algo: NVFP4
6566
kv_cache_quant_algo: FP8

tests/integration/defs/accuracy/test_llm_api_pytorch.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -476,25 +476,27 @@ def test_auto_dtype_tp8(self):
476476
task.evaluate(llm,
477477
extra_evaluator_kwargs=dict(apply_chat_template=True))
478478

479+
@skip_pre_hopper
479480
@pytest.mark.skip_less_mpi_world_size(8)
480481
@parametrize_with_ids("eagle3_one_model", [True, False])
481-
def test_eagle3_tp8(self, eagle3_one_model):
482-
model_path = f"{llm_models_root()}/llama-3.3-models/Llama-3.3-70B-Instruct"
482+
def test_fp8_eagle3_tp8(self, eagle3_one_model):
483+
model_path = f"{llm_models_root()}/modelopt-hf-model-hub/Llama-3.3-70B-Instruct-fp8"
483484
eagle_model_dir = f"{llm_models_root()}/EAGLE3-LLaMA3.3-Instruct-70B"
484485
kv_cache_config = KvCacheConfig(free_gpu_memory_fraction=0.6)
485486
spec_config = EagleDecodingConfig(max_draft_len=4,
486487
speculative_model_dir=eagle_model_dir,
487488
eagle3_one_model=eagle3_one_model)
488-
pytorch_config = dict(disable_overlap_scheduler=True, )
489+
pytorch_config = dict(
490+
disable_overlap_scheduler=True,
491+
cuda_graph_config=CudaGraphConfig(max_batch_size=1))
489492
with LLM(model_path,
493+
max_batch_size=16,
490494
tensor_parallel_size=8,
491495
speculative_config=spec_config,
492496
kv_cache_config=kv_cache_config,
493497
**pytorch_config) as llm:
494498
task = CnnDailymail(self.MODEL_NAME)
495499
task.evaluate(llm)
496-
task = MMLU(self.MODEL_NAME)
497-
task.evaluate(llm)
498500

499501
@pytest.mark.skip_less_device(4)
500502
@skip_pre_hopper

tests/integration/test_lists/qa/llm_function_full.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -460,8 +460,8 @@ accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_guided_decoding_
460460
accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_guided_decoding_with_ngram[llguidance]
461461
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_tp4
462462
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_nvfp4_tp4
463-
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_eagle3_tp8[eagle3_one_model=True]
464-
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_eagle3_tp8[eagle3_one_model=False]
463+
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_eagle3_tp8[eagle3_one_model=True]
464+
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_eagle3_tp8[eagle3_one_model=False]
465465
accuracy/test_llm_api_pytorch.py::TestMistral7B::test_auto_dtype
466466
accuracy/test_llm_api_pytorch.py::TestGemma3_1BInstruct::test_auto_dtype
467467
accuracy/test_llm_api_pytorch.py::TestMistralSmall24B::test_auto_dtype

tests/integration/test_lists/qa/llm_function_sanity.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ accuracy/test_llm_api_pytorch.py::TestLlama3_2_3B::test_auto_dtype
6767
accuracy/test_llm_api_pytorch.py::TestLlama3_2_3B::test_fp8_prequantized
6868
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_tp4
6969
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_nvfp4_tp4
70-
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_eagle3_tp8[eagle3_one_model=True]
71-
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_eagle3_tp8[eagle3_one_model=False]
70+
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_eagle3_tp8[eagle3_one_model=True]
71+
accuracy/test_llm_api_pytorch.py::TestLlama3_3_70BInstruct::test_fp8_eagle3_tp8[eagle3_one_model=False]
7272
accuracy/test_llm_api_pytorch.py::TestLlama4MaverickInstruct::test_auto_dtype[tp8-cuda_graph=False]
7373
accuracy/test_llm_api_pytorch.py::TestLlama4MaverickInstruct::test_auto_dtype[tp8ep4-cuda_graph=True]
7474
accuracy/test_llm_api_pytorch.py::TestLlama4MaverickInstruct::test_auto_dtype[tp8ep8-cuda_graph=True]

0 commit comments

Comments
 (0)