16
16
from pydantic_ai .exceptions import ModelHTTPError , ModelRetry
17
17
from pydantic_ai .messages import (
18
18
BinaryContent ,
19
+ DocumentUrl ,
19
20
ImageUrl ,
20
21
ModelRequest ,
21
22
ModelResponse ,
@@ -118,12 +119,12 @@ async def chat_completions_create( # pragma: lax no cover
118
119
119
120
120
121
def completion_message (
121
- message : MistralAssistantMessage , * , usage : MistralUsageInfo | None = None , with_created : bool = True
122
+ message : MistralAssistantMessage , * , usage : MistralUsageInfo | None = None , created : int = 1704067200
122
123
) -> MistralChatCompletionResponse :
123
124
return MistralChatCompletionResponse (
124
125
id = '123' ,
125
126
choices = [MistralChatCompletionChoice (finish_reason = 'stop' , index = 0 , message = message )],
126
- created = 1704067200 if with_created else None , # 2024-01-01
127
+ created = created ,
127
128
model = 'mistral-large-123' ,
128
129
object = 'chat.completion' ,
129
130
usage = usage or MistralUsageInfo (prompt_tokens = 1 , completion_tokens = 1 , total_tokens = 1 ),
@@ -142,7 +143,7 @@ def chunk(
142
143
MistralCompletionResponseStreamChoice (index = index , delta = delta , finish_reason = finish_reason )
143
144
for index , delta in enumerate (delta )
144
145
],
145
- created = 1704067200 if with_created else None , # 2024-01-01
146
+ created = 1704067200 , # 2024-01-01
146
147
model = 'gpt-4' ,
147
148
object = 'chat.completion.chunk' ,
148
149
usage = MistralUsageInfo (prompt_tokens = 1 , completion_tokens = 1 , total_tokens = 1 ),
@@ -187,13 +188,20 @@ def test_init():
187
188
188
189
189
190
async def test_multiple_completions (allow_model_requests : None ):
191
+ from datetime import datetime , timezone
192
+
190
193
completions = [
194
+ # First completion: created is "now" (simulate IsNow)
191
195
completion_message (
192
196
MistralAssistantMessage (content = 'world' ),
193
197
usage = MistralUsageInfo (prompt_tokens = 1 , completion_tokens = 1 , total_tokens = 1 ),
194
- with_created = False ,
198
+ created = int (datetime .now (tz = timezone .utc ).timestamp ()),
199
+ ),
200
+ # Second completion: created is fixed 2024-01-01 00:00:00 UTC
201
+ completion_message (
202
+ MistralAssistantMessage (content = 'hello again' ),
203
+ created = int (datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ).timestamp ()),
195
204
),
196
- completion_message (MistralAssistantMessage (content = 'hello again' )),
197
205
]
198
206
mock_client = MockMistralAI .create_mock (completions )
199
207
model = MistralModel ('mistral-large-latest' , provider = MistralProvider (mistral_client = mock_client ))
@@ -1909,6 +1917,72 @@ async def test_image_as_binary_content_input(allow_model_requests: None):
1909
1917
)
1910
1918
1911
1919
1920
+ async def test_pdf_url_input (allow_model_requests : None ):
1921
+ c = completion_message (MistralAssistantMessage (content = 'world' , role = 'assistant' ))
1922
+ mock_client = MockMistralAI .create_mock (c )
1923
+ m = MistralModel ('mistral-large-latest' , provider = MistralProvider (mistral_client = mock_client ))
1924
+ agent = Agent (m )
1925
+
1926
+ result = await agent .run (
1927
+ [
1928
+ 'hello' ,
1929
+ DocumentUrl (url = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf' ),
1930
+ ]
1931
+ )
1932
+ assert result .all_messages () == snapshot (
1933
+ [
1934
+ ModelRequest (
1935
+ parts = [
1936
+ UserPromptPart (
1937
+ content = [
1938
+ 'hello' ,
1939
+ DocumentUrl (url = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf' ),
1940
+ ],
1941
+ timestamp = IsDatetime (),
1942
+ )
1943
+ ]
1944
+ ),
1945
+ ModelResponse (
1946
+ parts = [TextPart (content = 'world' )],
1947
+ usage = Usage (requests = 1 , request_tokens = 1 , response_tokens = 1 , total_tokens = 1 ),
1948
+ model_name = 'mistral-large-123' ,
1949
+ timestamp = IsDatetime (),
1950
+ vendor_id = '123' ,
1951
+ ),
1952
+ ]
1953
+ )
1954
+
1955
+
1956
+ async def test_pdf_as_binary_content_input (allow_model_requests : None ):
1957
+ c = completion_message (MistralAssistantMessage (content = 'world' , role = 'assistant' ))
1958
+ mock_client = MockMistralAI .create_mock (c )
1959
+ m = MistralModel ('mistral-large-latest' , provider = MistralProvider (mistral_client = mock_client ))
1960
+ agent = Agent (m )
1961
+
1962
+ base64_content = b'%PDF-1.\r trailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>>>>>>>>'
1963
+
1964
+ result = await agent .run (['hello' , BinaryContent (data = base64_content , media_type = 'application/pdf' )])
1965
+ assert result .all_messages () == snapshot (
1966
+ [
1967
+ ModelRequest (
1968
+ parts = [
1969
+ UserPromptPart (
1970
+ content = ['hello' , BinaryContent (data = base64_content , media_type = 'application/pdf' )],
1971
+ timestamp = IsDatetime (),
1972
+ )
1973
+ ]
1974
+ ),
1975
+ ModelResponse (
1976
+ parts = [TextPart (content = 'world' )],
1977
+ usage = Usage (requests = 1 , request_tokens = 1 , response_tokens = 1 , total_tokens = 1 ),
1978
+ model_name = 'mistral-large-123' ,
1979
+ timestamp = IsDatetime (),
1980
+ vendor_id = '123' ,
1981
+ ),
1982
+ ]
1983
+ )
1984
+
1985
+
1912
1986
async def test_audio_as_binary_content_input (allow_model_requests : None ):
1913
1987
c = completion_message (MistralAssistantMessage (content = 'world' , role = 'assistant' ))
1914
1988
mock_client = MockMistralAI .create_mock (c )
@@ -1917,7 +1991,7 @@ async def test_audio_as_binary_content_input(allow_model_requests: None):
1917
1991
1918
1992
base64_content = b'//uQZ'
1919
1993
1920
- with pytest .raises (RuntimeError , match = 'Only image binary content is supported for Mistral.' ):
1994
+ with pytest .raises (RuntimeError , match = 'BinaryContent other than image or PDF is not supported in Mistral.' ):
1921
1995
await agent .run (['hello' , BinaryContent (data = base64_content , media_type = 'audio/wav' )])
1922
1996
1923
1997
0 commit comments