@@ -91,6 +91,7 @@ async def test_ok(self, faker: faker.Faker, func_request: LLMFuncRequest) -> Non
9191 + ChatCompletionsStreamingEvent (choices = [OneStreamingChoice (delta = one_message )]).model_dump_json ()
9292 for one_message in generated_messages
9393 )
94+ + f"\n \n data: { ChatCompletionsStreamingEvent (choices = []).model_dump_json ()} "
9495 + f"\n \n data: [DONE]\n \n data: { faker .pystr ()} \n \n "
9596 )
9697 response : typing .Final = httpx .Response (
@@ -104,23 +105,6 @@ async def test_ok(self, faker: faker.Faker, func_request: LLMFuncRequest) -> Non
104105
105106 assert result == expected_result
106107
107- async def test_fails_without_alternatives (self ) -> None :
108- response_content : typing .Final = (
109- f"data: { ChatCompletionsStreamingEvent .model_construct (choices = []).model_dump_json ()} \n \n "
110- )
111- response : typing .Final = httpx .Response (
112- 200 ,
113- headers = {"Content-Type" : "text/event-stream" },
114- content = response_content ,
115- )
116- client : typing .Final = any_llm_client .get_client (
117- OpenAIConfigFactory .build (),
118- transport = httpx .MockTransport (lambda _ : response ),
119- )
120-
121- with pytest .raises (LLMResponseValidationError ):
122- await consume_llm_message_chunks (client .stream_llm_message_chunks (** LLMFuncRequestFactory .build ()))
123-
124108
125109class TestOpenAILLMErrors :
126110 @pytest .mark .parametrize ("stream" , [True , False ])
0 commit comments