Skip to content

Commit 930723f

Browse files
committed
update logic
update logic update logic update logic
1 parent 1b4df4d commit 930723f

File tree

2 files changed

+29
-41
lines changed

2 files changed

+29
-41
lines changed

pydantic_ai_slim/pydantic_ai/models/openai.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -935,9 +935,11 @@ async def _responses_create(
935935
tool_choice = 'required'
936936
else:
937937
tool_choice = 'auto'
938+
938939
previous_response_id = model_settings.get('openai_previous_response_id')
939940
if previous_response_id == 'auto':
940941
messages, previous_response_id = self._get_response_id_and_trim(messages)
942+
941943
instructions, openai_messages = await self._map_messages(messages)
942944
reasoning = self._get_reasoning(model_settings)
943945

@@ -1049,26 +1051,21 @@ def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam
10491051
}
10501052

10511053
def _get_response_id_and_trim(self, messages: list[ModelMessage]) -> tuple[list[ModelMessage], str | None]:
1052-
# If the message history contains only openai responses,
1053-
# we can limit the history to the most recent ModelRequest.
1054-
# The provider_response_id from the latest ModelResponse is
1055-
# then passed as previous_response_id to preserve context.
1054+
# In `auto` mode, the history is trimmed up to (but not including)
1055+
# the latest ModelResponse with a valid `provider_response_id`.
1056+
# This is then passed as `previous_response_id` in the next request
1057+
# to maintain context along with the trimmed history.
10561058
response_id = None
1057-
latest_model_request: ModelRequest | None = None
1058-
for m in messages:
1059-
# Openai may return a dated model_name that differs from self.model_name
1060-
# (e.g., "gpt-5" vs "gpt-5-2025-08-07").
1061-
if isinstance(m, ModelResponse) and m.model_name and (self.model_name in m.model_name):
1059+
trimmed_messages: list[ModelMessage] = []
1060+
for m in reversed(messages):
1061+
if isinstance(m, ModelResponse) and m.provider_name == self.system:
10621062
response_id = m.provider_response_id
1063-
elif isinstance(m, ModelRequest):
1064-
latest_model_request = m
1065-
else:
1066-
# Mixed model responses invalidate response_id,
1067-
# so the history is kept intact.
1068-
response_id = None
10691063
break
1070-
if response_id and latest_model_request:
1071-
return [latest_model_request], response_id
1064+
else:
1065+
trimmed_messages.append(m)
1066+
1067+
if response_id and trimmed_messages:
1068+
return list(reversed(trimmed_messages)), response_id
10721069
else:
10731070
return messages, None
10741071

tests/models/test_openai_responses.py

Lines changed: 15 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1192,6 +1192,21 @@ async def test_openai_previous_response_id_auto_mode(allow_model_requests: None,
11921192
async def test_openai_previous_response_id_mixed_model_history(allow_model_requests: None, openai_api_key: str):
11931193
"""Test if invalid previous response id is ignored when history contains non-OpenAI responses"""
11941194
history = [
1195+
# ModelRequest(
1196+
# parts=[
1197+
# UserPromptPart(
1198+
# content='The first secret key is sesame',
1199+
# ),
1200+
# ],
1201+
# ),
1202+
# ModelResponse(
1203+
# parts=[
1204+
# TextPart(content='Open sesame! What would you like to unlock?'),
1205+
# ],
1206+
# model_name='gpt-5',
1207+
# provider_name='openai',
1208+
# provider_response_id='resp_68b9bd97025c8195b443af591ca2345c08cb6072affe6099',
1209+
# ),
11951210
ModelRequest(
11961211
parts=[
11971212
UserPromptPart(
@@ -1203,21 +1218,6 @@ async def test_openai_previous_response_id_mixed_model_history(allow_model_reque
12031218
parts=[
12041219
TextPart(content='Open sesame! What would you like to unlock?'),
12051220
],
1206-
model_name='gpt-5',
1207-
provider_name='openai',
1208-
provider_response_id='resp_68b9bd97025c8195b443af591ca2345c08cb6072affe6099',
1209-
),
1210-
ModelRequest(
1211-
parts=[
1212-
UserPromptPart(
1213-
content='The second secret key is olives',
1214-
),
1215-
],
1216-
),
1217-
ModelResponse(
1218-
parts=[
1219-
TextPart(content='Understood'),
1220-
],
12211221
model_name='claude-3-5-sonnet-latest',
12221222
provider_name='anthropic',
12231223
provider_response_id='msg_01XUQuedGz9gusk4xZm4gWJj',
@@ -1240,15 +1240,6 @@ async def test_openai_previous_response_id_mixed_model_history(allow_model_reque
12401240
ModelResponse(
12411241
parts=[TextPart(content='Open sesame! What would you like to unlock?')],
12421242
usage=RequestUsage(),
1243-
model_name='gpt-5',
1244-
timestamp=IsDatetime(),
1245-
provider_name='openai',
1246-
provider_response_id='resp_68b9bd97025c8195b443af591ca2345c08cb6072affe6099',
1247-
),
1248-
ModelRequest(parts=[UserPromptPart(content='The second secret key is olives', timestamp=IsDatetime())]),
1249-
ModelResponse(
1250-
parts=[TextPart(content='Understood')],
1251-
usage=RequestUsage(),
12521243
model_name='claude-3-5-sonnet-latest',
12531244
timestamp=IsDatetime(),
12541245
provider_name='anthropic',

0 commit comments

Comments
 (0)