Skip to content

Commit 654ba1d

Browse files
Tabrizianyuanjingx87
authored andcommitted
[https://nvbugs/5451296][bug] Fix a thread leak in test_llm_args.py (#7017)
Signed-off-by: Iman Tabrizian <[email protected]>
1 parent 2502901 commit 654ba1d

File tree

1 file changed

+14
-17
lines changed

1 file changed

+14
-17
lines changed

tests/unittest/llmapi/test_llm_args.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -426,23 +426,20 @@ class TestTorchLlmArgs:
426426

427427
@print_traceback_on_error
428428
def test_runtime_sizes(self):
429-
llm = TorchLLM(
430-
llama_model_path,
431-
max_beam_width=1,
432-
max_num_tokens=256,
433-
max_seq_len=128,
434-
max_batch_size=8,
435-
)
436-
437-
assert llm.args.max_beam_width == 1
438-
assert llm.args.max_num_tokens == 256
439-
assert llm.args.max_seq_len == 128
440-
assert llm.args.max_batch_size == 8
441-
442-
assert llm._executor_config.max_beam_width == 1
443-
assert llm._executor_config.max_num_tokens == 256
444-
assert llm._executor_config.max_seq_len == 128
445-
assert llm._executor_config.max_batch_size == 8
429+
with TorchLLM(llama_model_path,
430+
max_beam_width=1,
431+
max_num_tokens=256,
432+
max_seq_len=128,
433+
max_batch_size=8) as llm:
434+
assert llm.args.max_beam_width == 1
435+
assert llm.args.max_num_tokens == 256
436+
assert llm.args.max_seq_len == 128
437+
assert llm.args.max_batch_size == 8
438+
439+
assert llm._executor_config.max_beam_width == 1
440+
assert llm._executor_config.max_num_tokens == 256
441+
assert llm._executor_config.max_seq_len == 128
442+
assert llm._executor_config.max_batch_size == 8
446443

447444
def test_dynamic_setattr(self):
448445
with pytest.raises(pydantic_core._pydantic_core.ValidationError):

0 commit comments

Comments
 (0)