diff --git a/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py b/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py index f0cf0bfc..8b69b478 100755 --- a/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py +++ b/genai-perf/genai_perf/profile_data_parser/llm_profile_data_parser.py @@ -524,9 +524,15 @@ def _extract_openai_chat_text_output(self, response: str) -> str: # https://platform.openai.com/docs/api-reference/completions return completions.get("text", "") elif obj_type == "chat.completion": # non-streaming - return completions["message"].get("content", "") + if completions["message"].get("reasoning_content", None) is not None: # vLLM model enable reasoning parser + return completions["message"].get("reasoning_content", "") + else: + return completions["message"].get("content", "") elif obj_type == "chat.completion.chunk": # streaming - return completions["delta"].get("content", "") + if completions["delta"].get("reasoning_content", None) is not None: # vLLM model enable reasoning parser + return completions["delta"].get("reasoning_content", "") + else: + return completions["delta"].get("content", "") else: raise ValueError(f"Unknown OpenAI response object type '{obj_type}'.")