Skip to content

Commit f84dd64

Browse files
authored
[None][infra] Waive failed tests on main branch 8/20 (NVIDIA#7092)
Signed-off-by: qqiao <[email protected]>
1 parent b95cab2 commit f84dd64

File tree

3 files changed

+3
-0
lines changed

3 files changed

+3
-0
lines changed

tests/integration/test_lists/waives.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -319,3 +319,4 @@ disaggregated/test_disaggregated.py::test_disaggregated_diff_max_tokens[TinyLlam
319319
disaggregated/test_disaggregated.py::test_disaggregated_deepseek_v3_lite_fp8_tp1_single_gpu_mtp[DeepSeek-V3-Lite-fp8] SKIP (https://nvbugs/5465642)
320320
examples/test_multimodal.py::test_llm_multimodal_general[Mistral-Small-3.1-24B-Instruct-2503-pp:1-tp:1-bfloat16-bs:1-cpp_e2e:False-nb:1] SKIP (https://nvbugs/5431146)
321321
accuracy/test_llm_api_pytorch.py::TestDeepSeekR1::test_fp8_blockscale[latency] SKIP (https://nvbugs/5464461)
322+
disaggregated/test_disaggregated.py::test_disaggregated_benchmark_on_diff_backends[DeepSeek-V3-Lite-fp8] SKIP (https://nvbugs/5448449)

tests/unittest/_torch/auto_deploy/unit/singlegpu/test_ad_trtllm_bench.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -600,6 +600,7 @@ def test_trtllm_bench(llm_root): # noqa: F811
600600
run_benchmark(model_name, dataset_path, temp_dir)
601601

602602

603+
@pytest.mark.skip(reason="https://nvbugs/5458798")
603604
@pytest.mark.no_xdist
604605
def test_trtllm_bench_backend_comparison(llm_root): # noqa: F811
605606
"""Test that compares autodeploy backend performance against pytorch backend

tests/unittest/_torch/modules/test_fused_moe.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,7 @@ def per_rank_test_fused_moe_alltoall(job_id):
289289
assert r is None
290290

291291

292+
@pytest.mark.skip(reason="https://nvbugs/5467531")
292293
@pytest.mark.skipif(torch.cuda.device_count() < 4,
293294
reason="needs 4 GPUs to run this test")
294295
@pytest.mark.parametrize("alltoall_method_type", [

0 commit comments

Comments
 (0)