diff --git a/docs/models/openai.md b/docs/models/openai.md index 12b7fd659b..aceb9f0daf 100644 --- a/docs/models/openai.md +++ b/docs/models/openai.md @@ -202,6 +202,131 @@ print(result2.output) #> This is an excellent joke invented by Samuel Colvin, it needs no explanation. ``` +### Freeform Function Calling + +GPT‑5 can now send raw text payloads - anything from Python scripts to SQL queries - to your custom tool without wrapping the data in JSON using freeform function calling. This differs from classic structured function calls, giving you greater flexibility when interacting with external runtimes such as: + +* code execution with sandboxes (Python, C++, Java, …) +* SQL databases +* Shell environments +* Configuration generators + +Note that freeform function calling does NOT support parallel tool calling. + +You can enable freeform function calling for a tool using the `text_format` parameter when creating your tool. To use this the tool must take a single string argument (other than the runtime context) and the model must be one of the GPT-5 responses models. For example: + +```python +from pydantic_ai import Agent +from pydantic_ai.models.openai import OpenAIResponsesModel + +model = OpenAIResponsesModel('gpt-5') # (1)! +agent = Agent(model) + +@agent.tool_plain(text_format='text') # (2)! +def freeform_tool(sql: str): ... +``` + +1. The GPT-5 family (`gpt-5`, `gpt-5-mini`, `gpt-5-nano`) all support freeform function calling. +2. If the tool or model cannot be used with freeform function calling then it will be invoked in the normal way. + +You can read more about this function calling style in the [OpenAI documentation](https://cookbook.openai.com/examples/gpt-5/gpt-5_new_params_and_tools#2-freeform-function-calling). + +#### Context Free Grammar + +A tool that queries an SQL database can only accept valid SQL. The freeform function calling of GPT-5 supports generation of valid SQL for this situation by constraining the generated text using a context free grammar. + +A context‑free grammar is a collection of production rules that define which strings belong to a language. Each rule rewrites a non‑terminal symbol into a sequence of terminals (literal tokens) and/or other non‑terminals, independent of surrounding context—hence context‑free. CFGs can capture the syntax of most programming languages and, in OpenAI custom tools, serve as contracts that force the model to emit only strings that the grammar accepts. + +##### Regular Expression + +The grammar can be written as either a regular expression: + + +```python +from pydantic_ai import Agent, FunctionTextFormat +from pydantic_ai.models.openai import OpenAIResponsesModel + +model = OpenAIResponsesModel('gpt-5') # (1)! +agent = Agent(model) + +timestamp_grammar_definition = r'^\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]) (?:[01]\d|2[0-3]):[0-5]\d$' + +@agent.tool_plain(text_format=FunctionTextFormat(syntax='regex', grammar=timestamp_grammar_definition)) # (2)! +def timestamp_accepting_tool(timestamp: str): ... +``` + +1. The GPT-5 family (`gpt-5`, `gpt-5-mini`, `gpt-5-nano`) all support freeform function calling with context free grammar constraints. Unfortunately `gpt-5-nano` often struggles with these calls. +2. If the tool or model cannot be used with freeform function calling then it will be invoked in the normal way, which may lead to invalid input. + +##### LARK + +Or as a [LARK](https://lark-parser.readthedocs.io/en/latest/how_to_use.html) grammar: + +```python +from pydantic_ai import Agent, FunctionTextFormat +from pydantic_ai.models.openai import OpenAIResponsesModel + +model = OpenAIResponsesModel('gpt-5') # (1)! +agent = Agent(model) + +timestamp_grammar_definition = r''' +start: timestamp + +timestamp: YEAR "-" MONTH "-" DAY " " HOUR ":" MINUTE + +%import common.DIGIT + +YEAR: DIGIT DIGIT DIGIT DIGIT +MONTH: /(0[1-9]|1[0-2])/ +DAY: /(0[1-9]|[12]\d|3[01])/ +HOUR: /([01]\d|2[0-3])/ +MINUTE: /[0-5]\d/ +''' + +@agent.tool_plain(text_format=FunctionTextFormat(syntax='lark', grammar=timestamp_grammar_definition)) # (2)! +def i_like_iso_dates(date: str): ... +``` + +1. The GPT-5 family (`gpt-5`, `gpt-5-mini`, `gpt-5-nano`) all support freeform function calling with context free grammar constraints. Unfortunately `gpt-5-nano` often struggles with these calls. +2. If the tool or model cannot be used with freeform function calling then it will be invoked in the normal way, which may lead to invalid input. + +There is a limit to the grammar complexity that GPT-5 supports, as such it is important to test your grammar. + +Freeform function calling, with or without a context free grammar, can be used with the output tool for the agent: + +```python +from pydantic_ai import Agent, FunctionTextFormat +from pydantic_ai.models.openai import OpenAIResponsesModel +from pydantic_ai.output import ToolOutput + +sql_grammar_definition = r''' +start: select_stmt +select_stmt: "SELECT" select_list "FROM" table ("WHERE" condition ("AND" condition)*)? +select_list: "*" | column ("," column)* +table: "users" | "orders" +column: "id" | "user_id" | "name" | "age" +condition: column ("=" | ">" | "<") (NUMBER | STRING) +%import common.NUMBER +%import common.ESCAPED_STRING -> STRING +%import common.WS +%ignore WS +''' # (1)! + +output_tool = ToolOutput(str, text_format=FunctionTextFormat(syntax='lark', grammar=sql_grammar_definition)) +model = OpenAIResponsesModel('gpt-5') +agent = Agent(model, output_type=output_tool) +``` + +1. An inline SQL grammar definition would be quite extensive and so this simplified version has been written, you can find an example SQL grammar [in the openai example](https://cookbook.openai.com/examples/gpt-5/gpt-5_new_params_and_tools#33-example---sql-dialect--ms-sql-vs-postgresql). There are also example grammars in the [lark repo](https://github.com/lark-parser/lark/blob/master/examples/composition/json.lark). Remember that a simpler grammar that matches your DDL will be easier for GPT-5 to work with and will result in fewer semantically invalid results. + +##### Best Practices + +You can find recommended best practices in the [OpenAI Cookbook](https://cookbook.openai.com/examples/gpt-5/gpt-5_new_params_and_tools#35-best-practices). + +* [Lark Docs](https://lark-parser.readthedocs.io/en/stable/) +* [Lark IDE](https://www.lark-parser.org/ide/) +* [OpenAI Cookbook on CFG](https://cookbook.openai.com/examples/gpt-5/gpt-5_new_params_and_tools#3-contextfree-grammar-cfg) + ## OpenAI-compatible Models Many providers and models are compatible with the OpenAI API, and can be used with `OpenAIChatModel` in Pydantic AI. diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index 8f6254f425..37bdcfa0b5 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -90,7 +90,18 @@ ) from .run import AgentRun, AgentRunResult, AgentRunResultEvent from .settings import ModelSettings -from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied +from .tools import ( + DeferredToolRequests, + DeferredToolResults, + LarkTextFormat, + RegexTextFormat, + RunContext, + TextFormat, + Tool, + ToolApproved, + ToolDefinition, + ToolDenied, +) from .toolsets import ( AbstractToolset, ApprovalRequiredToolset, @@ -191,6 +202,9 @@ 'DeferredToolResults', 'ToolApproved', 'ToolDenied', + 'TextFormat', + 'RegexTextFormat', + 'LarkTextFormat', # toolsets 'AbstractToolset', 'ApprovalRequiredToolset', diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py index cc05b18898..d6fdf24104 100644 --- a/pydantic_ai_slim/pydantic_ai/_output.py +++ b/pydantic_ai_slim/pydantic_ai/_output.py @@ -31,7 +31,7 @@ ToolOutput, _OutputSpecItem, # type: ignore[reportPrivateUsage] ) -from .tools import GenerateToolJsonSchema, ObjectJsonSchema, ToolDefinition +from .tools import GenerateToolJsonSchema, ObjectJsonSchema, TextFormat, ToolDefinition from .toolsets.abstract import AbstractToolset, ToolsetTool if TYPE_CHECKING: @@ -656,6 +656,7 @@ def __init__( name: str | None = None, description: str | None = None, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, ): if inspect.isfunction(output) or inspect.ismethod(output): self._function_schema = _function_schema.function_schema(output, GenerateToolJsonSchema) @@ -711,6 +712,7 @@ def __init__( description=description, json_schema=json_schema, strict=strict, + text_format=text_format, ) ) @@ -979,11 +981,13 @@ def build( name = None description = None strict = None + text_format = None if isinstance(output, ToolOutput): # do we need to error on conflicts here? (DavidM): If this is internal maybe doesn't matter, if public, use overloads name = output.name description = output.description strict = output.strict + text_format = output.text_format output = output.output @@ -991,7 +995,9 @@ def build( if strict is None: strict = default_strict - processor = ObjectOutputProcessor(output=output, description=description, strict=strict) + processor = ObjectOutputProcessor( + output=output, description=description, strict=strict, text_format=text_format + ) object_def = processor.object_def if name is None: @@ -1016,6 +1022,7 @@ def build( description=description, parameters_json_schema=object_def.json_schema, strict=object_def.strict, + text_format=object_def.text_format, outer_typed_dict_key=processor.outer_typed_dict_key, kind='output', ) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 05f8c4046f..2136130a24 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -8,7 +8,7 @@ from collections.abc import AsyncIterator, Awaitable, Callable, Iterator, Sequence from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager, contextmanager from contextvars import ContextVar -from typing import TYPE_CHECKING, Any, ClassVar, cast, overload +from typing import TYPE_CHECKING, Any, ClassVar, Literal, cast, overload from opentelemetry.trace import NoOpTracer, use_span from pydantic.json_schema import GenerateJsonSchema @@ -50,6 +50,7 @@ DocstringFormat, GenerateToolJsonSchema, RunContext, + TextFormat, Tool, ToolFuncContext, ToolFuncEither, @@ -1012,6 +1013,7 @@ def tool( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, @@ -1029,6 +1031,7 @@ def tool( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, @@ -1076,6 +1079,8 @@ async def spam(ctx: RunContext[str], y: float) -> float: schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using freeform function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. sequential: Whether the function requires a sequential/serial execution environment. Defaults to False. requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. @@ -1096,6 +1101,7 @@ def tool_decorator( require_parameter_descriptions=require_parameter_descriptions, schema_generator=schema_generator, strict=strict, + text_format=text_format, sequential=sequential, requires_approval=requires_approval, metadata=metadata, @@ -1119,6 +1125,7 @@ def tool_plain( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, @@ -1136,6 +1143,7 @@ def tool_plain( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, @@ -1183,6 +1191,8 @@ async def spam(ctx: RunContext[str]) -> float: schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using freeform function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. sequential: Whether the function requires a sequential/serial execution environment. Defaults to False. requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. @@ -1201,6 +1211,7 @@ def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams require_parameter_descriptions=require_parameter_descriptions, schema_generator=schema_generator, strict=strict, + text_format=text_format, sequential=sequential, requires_approval=requires_approval, metadata=metadata, diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index 81c7491966..7939b24e3a 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -48,7 +48,7 @@ from ..profiles.openai import OpenAIModelProfile, OpenAISystemPromptRole from ..providers import Provider, infer_provider from ..settings import ModelSettings -from ..tools import ToolDefinition +from ..tools import LarkTextFormat, RegexTextFormat, ToolDefinition from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests, download_item, get_user_agent try: @@ -81,6 +81,7 @@ from openai.types.responses.response_status import ResponseStatus from openai.types.shared import ReasoningEffort from openai.types.shared_params import Reasoning + from openai.types.shared_params.custom_tool_input_format import CustomToolInputFormat except ImportError as _import_error: raise ImportError( 'Please install `openai` to use the OpenAI model, ' @@ -1026,9 +1027,25 @@ def _process_response( # noqa: C901 elif isinstance(item, responses.ResponseComputerToolCall): # pragma: no cover # Pydantic AI doesn't yet support the ComputerUse built-in tool pass - elif isinstance(item, responses.ResponseCustomToolCall): # pragma: no cover - # Support is being implemented in https://github.com/pydantic/pydantic-ai/pull/2572 - pass + elif isinstance(item, responses.ResponseCustomToolCall): + # Handle custom tool calls (freeform function calling) + if item.name not in model_request_parameters.tool_defs: + argument_name = 'input' + else: + tool = model_request_parameters.tool_defs[item.name] + tool_argument_name = tool.single_string_argument_name + if tool_argument_name is None: + raise UnexpectedModelBehavior( + f'Custom tool call made to function {item.name} which has unexpected arguments' + ) + argument_name = tool_argument_name + items.append( + ToolCallPart( + item.name, + {argument_name: item.input}, + tool_call_id=_combine_tool_call_ids(item.call_id, item.id), + ) + ) elif isinstance(item, responses.response_output_item.LocalShellCall): # pragma: no cover # Pydantic AI doesn't yet support the `codex-mini-latest` LocalShell built-in tool pass @@ -1164,11 +1181,14 @@ async def _responses_create( try: extra_headers = model_settings.get('extra_headers', {}) extra_headers.setdefault('User-Agent', get_user_agent()) + parallel_tool_calls = self._get_parallel_tool_calling( + model_settings=model_settings, model_request_parameters=model_request_parameters + ) return await self.client.responses.create( input=openai_messages, model=self._model_name, instructions=instructions, - parallel_tool_calls=model_settings.get('parallel_tool_calls', NOT_GIVEN), + parallel_tool_calls=parallel_tool_calls, tools=tools or NOT_GIVEN, tool_choice=tool_choice or NOT_GIVEN, max_output_tokens=model_settings.get('max_tokens', NOT_GIVEN), @@ -1210,7 +1230,16 @@ def _get_reasoning(self, model_settings: OpenAIResponsesModelSettings) -> Reason return NOT_GIVEN return Reasoning(effort=reasoning_effort, summary=reasoning_summary) - def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.FunctionToolParam]: + def _get_parallel_tool_calling( + self, model_settings: OpenAIResponsesModelSettings, model_request_parameters: ModelRequestParameters + ) -> bool | NotGiven: + if any(tool_definition.text_format for tool_definition in model_request_parameters.tool_defs.values()): + return False + return model_settings.get('parallel_tool_calls', NOT_GIVEN) + + def _get_tools( + self, model_request_parameters: ModelRequestParameters + ) -> list[responses.FunctionToolParam | responses.CustomToolParam]: return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()] def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.ToolParam]: @@ -1253,15 +1282,44 @@ def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) - tools.append({'type': 'image_generation'}) return tools - def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam: + def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam | responses.CustomToolParam: + model_profile = OpenAIModelProfile.from_profile(self.profile) + if f.text_format: + if not model_profile.openai_supports_freeform_function_calling: + raise UserError( + f'Tool {f.name!r} uses freeform function calling but {self._model_name!r} does not support freeform function calling.' + ) + if not f.only_takes_string_argument: + raise UserError( + f'`Tool {f.name!r}` is set as a freeform function but does not take a single string argument.' + ) + + # Handle different text format types + format: CustomToolInputFormat | None = None + if f.text_format == 'plain': + format = {'type': 'text'} + elif isinstance(f.text_format, RegexTextFormat): + format = {'type': 'grammar', 'syntax': 'regex', 'definition': f.text_format.pattern} + elif isinstance(f.text_format, LarkTextFormat): + format = {'type': 'grammar', 'syntax': 'lark', 'definition': f.text_format.definition} + + # If format was set (known type), return the custom tool param + # Otherwise fall through to return normal function tool (unknown text format type) + if format is not None: + tool_param: responses.CustomToolParam = { + 'name': f.name, + 'type': 'custom', + 'description': f.description or '', + 'format': format, + } + return tool_param + return { 'name': f.name, 'parameters': f.parameters_json_schema, 'type': 'function', 'description': f.description, - 'strict': bool( - f.strict and OpenAIModelProfile.from_profile(self.profile).openai_supports_strict_tool_definition - ), + 'strict': bool(f.strict and model_profile.openai_supports_strict_tool_definition), } def _get_previous_response_id_and_new_messages( diff --git a/pydantic_ai_slim/pydantic_ai/output.py b/pydantic_ai_slim/pydantic_ai/output.py index 27d7f84aea..2b2f80d7f5 100644 --- a/pydantic_ai_slim/pydantic_ai/output.py +++ b/pydantic_ai_slim/pydantic_ai/output.py @@ -12,7 +12,7 @@ from . import _utils, exceptions from ._json_schema import InlineDefsJsonSchemaTransformer from .messages import ToolCallPart -from .tools import DeferredToolRequests, ObjectJsonSchema, RunContext, ToolDefinition +from .tools import DeferredToolRequests, ObjectJsonSchema, RunContext, TextFormat, ToolDefinition __all__ = ( # classes @@ -114,6 +114,8 @@ class Vehicle(BaseModel): """The maximum number of retries for the tool.""" strict: bool | None """Whether to use strict mode for the tool.""" + text_format: Literal['plain'] | TextFormat | None = None + """Whether to invoke the function with freeform function calling for tool calls.""" def __init__( self, @@ -123,12 +125,14 @@ def __init__( description: str | None = None, max_retries: int | None = None, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, ): self.output = type_ self.name = name self.description = description self.max_retries = max_retries self.strict = strict + self.text_format = text_format @dataclass(init=False) @@ -255,6 +259,7 @@ class OutputObjectDefinition: name: str | None = None description: str | None = None strict: bool | None = None + text_format: Literal['plain'] | TextFormat | None = None @dataclass diff --git a/pydantic_ai_slim/pydantic_ai/profiles/openai.py b/pydantic_ai_slim/pydantic_ai/profiles/openai.py index 553fe245c8..f7157acf52 100644 --- a/pydantic_ai_slim/pydantic_ai/profiles/openai.py +++ b/pydantic_ai_slim/pydantic_ai/profiles/openai.py @@ -38,6 +38,11 @@ class OpenAIModelProfile(ModelProfile): openai_system_prompt_role: OpenAISystemPromptRole | None = None """The role to use for the system prompt message. If not provided, defaults to `'system'`.""" + # GPT-5 introduced support for directly calling a function with a string. + openai_supports_freeform_function_calling: bool = False + """Whether the provider accepts the value ``type='custom'`` for tools in the + request payload.""" + openai_chat_supports_web_search: bool = False """Whether the model supports web search in Chat Completions API.""" @@ -56,6 +61,7 @@ def __post_init__(self): # pragma: no cover def openai_model_profile(model_name: str) -> ModelProfile: """Get the model profile for an OpenAI model.""" is_reasoning_model = model_name.startswith('o') or model_name.startswith('gpt-5') + is_freeform_function_calling_model = model_name.startswith('gpt-5') # Check if the model supports web search (only specific search-preview models) supports_web_search = '-search-preview' in model_name @@ -85,6 +91,7 @@ def openai_model_profile(model_name: str) -> ModelProfile: supports_json_schema_output=True, supports_json_object_output=True, supports_image_output=is_reasoning_model or '4.1' in model_name or '4o' in model_name, + openai_supports_freeform_function_calling=is_freeform_function_calling_model, openai_unsupported_model_settings=openai_unsupported_model_settings, openai_system_prompt_role=openai_system_prompt_role, openai_chat_supports_web_search=supports_web_search, diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index 844e99a25e..976b87be11 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -1,8 +1,10 @@ from __future__ import annotations as _annotations +import re from collections.abc import Awaitable, Callable, Sequence from dataclasses import KW_ONLY, dataclass, field, replace from typing import Annotated, Any, Concatenate, Generic, Literal, TypeAlias, cast +from warnings import warn from pydantic import Discriminator, Tag from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue @@ -32,6 +34,9 @@ 'DeferredToolResults', 'ToolApproved', 'ToolDenied', + 'TextFormat', + 'RegexTextFormat', + 'LarkTextFormat', ) @@ -216,6 +221,99 @@ class DeferredToolResults: A = TypeVar('A') +@dataclass +class RegexTextFormat: + """Text format using regular expression pattern matching. + + The function must take a single string argument that will be validated + against the provided regex pattern by the model. + + Calling a function in this way prevents parallel tool calling. + + Example: + ```python + from pydantic_ai import Agent, RegexTextFormat + + agent = Agent('openai:gpt-5') + + @agent.tool_plain(text_format=RegexTextFormat(r'\\d{3}-\\d{4}')) + def parse_phone(phone: str) -> str: + return f'Parsed phone: {phone}' + ``` + + Note: this is currently only supported by OpenAI GPT-5 models. + """ + + pattern: str + """The regular expression pattern that the text must conform to.""" + + def __post_init__(self) -> None: + try: + re.compile(self.pattern) + except re.error as e: + raise ValueError('Regex pattern is invalid') from e + + +@dataclass +class LarkTextFormat: + """Text format using Lark parser grammar. + + The function must take a single string argument that will be validated + against the provided Lark grammar by the model. + + Requires the `lark` package to be installed for validation during tool definition. + + Calling a function in this way prevents parallel tool calling. + + Example: + ```python + from pydantic_ai import Agent, LarkTextFormat + + agent = Agent('openai:gpt-5') + + grammar = ''' + start: "hello" name + name: /[A-Za-z]+/ + ''' + + @agent.tool_plain(text_format=LarkTextFormat(grammar)) + def greet(text: str) -> str: + return f'Greeting: {text}' + ``` + + Note: this is currently only supported by OpenAI GPT-5 models. + """ + + definition: str + """The Lark grammar definition that the text must conform to.""" + + def __post_init__(self) -> None: + try: + import lark + from lark.exceptions import GrammarError + + try: + lark.Lark(self.definition) + except GrammarError as e: + raise ValueError('Lark grammar is invalid') from e + except ImportError: + warn( + 'Cannot validate lark grammar as the lark optional dependency group has not been installed', + stacklevel=2, + ) # pragma: no cover + + +TextFormat: TypeAlias = RegexTextFormat | LarkTextFormat +"""Union of all supported text format types for freeform function calling. + +Text formats allow constraining the plain text passed to tools instead of using JSON. +The function must take a single string argument and prevents parallel tool calling. + +Note: Support varies by model. Currently only OpenAI GPT-5 models support this feature. +Unsupported formats will be silently ignored by models that don't support them. +""" + + class GenerateToolJsonSchema(GenerateJsonSchema): def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue: json_schema = super().typed_dict_schema(schema) @@ -253,6 +351,7 @@ class Tool(Generic[AgentDepsT]): docstring_format: DocstringFormat require_parameter_descriptions: bool strict: bool | None + text_format: Literal['plain'] | TextFormat | None sequential: bool requires_approval: bool metadata: dict[str, Any] | None @@ -276,6 +375,7 @@ def __init__( require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, @@ -331,6 +431,8 @@ async def prep_my_tool( schema_generator: The JSON schema generator class to use. Defaults to `GenerateToolJsonSchema`. strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. + text_format: Used to invoke the function using freeform function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. sequential: Whether the function requires a sequential/serial execution environment. Defaults to False. requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. @@ -353,6 +455,7 @@ async def prep_my_tool( self.docstring_format = docstring_format self.require_parameter_descriptions = require_parameter_descriptions self.strict = strict + self.text_format = text_format self.sequential = sequential self.requires_approval = requires_approval self.metadata = metadata @@ -409,6 +512,7 @@ def tool_def(self): description=self.description, parameters_json_schema=self.function_schema.json_schema, strict=self.strict, + text_format=self.text_format, sequential=self.sequential, metadata=self.metadata, ) @@ -479,6 +583,18 @@ class ToolDefinition: Note: this is currently only supported by OpenAI models. """ + text_format: Literal['plain'] | TextFormat | None = None + """Whether to invoke the function with freeform function calling for tool calls. + + Setting this to a format while using a supported model prevents parallel tool calling + in exchange for passing raw text payloads to your custom tool without wrapping the data in JSON. + The function must take a single string argument. + + When `None` (the default), the model invokes the tool in the normal way and parallel tool calls are possible. + + Note: this is currently only supported by OpenAI GPT-5 models. + """ + sequential: bool = False """Whether this tool requires a sequential/serial execution environment.""" @@ -499,6 +615,28 @@ class ToolDefinition: For MCP tools, this contains the `meta`, `annotations`, and `output_schema` fields from the tool definition. """ + @property + def only_takes_string_argument(self) -> bool: + # true if the parameters_json_schema looks like: + # {"additionalProperties": False, "properties": {NAME: {"type": "string"}}, "required": ["NAME"], "type": "object"} + return self.single_string_argument_name is not None + + @property + def single_string_argument_name(self) -> str | None: + # returns the name of the single argument that is a string + # used for freeform function calling + # will return None if there is more or less than one argument, + # or if the argument is not a string + schema = self.parameters_json_schema + if len(schema['required']) != 1: + return None + if len(schema['properties']) != 1: + return None + property_name: str = schema['required'][0] + if not schema['properties'][property_name].get('type', None) == 'string': + return None + return property_name + @property def defer(self) -> bool: """Whether calls to this tool will be deferred. diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/function.py b/pydantic_ai_slim/pydantic_ai/toolsets/function.py index 014dd7d8a6..44fef20cf1 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/function.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/function.py @@ -2,7 +2,7 @@ from collections.abc import Awaitable, Callable, Sequence from dataclasses import dataclass, replace -from typing import Any, overload +from typing import Any, Literal, overload from pydantic.json_schema import GenerateJsonSchema @@ -11,6 +11,7 @@ from ..tools import ( DocstringFormat, GenerateToolJsonSchema, + TextFormat, Tool, ToolFuncEither, ToolParams, @@ -115,6 +116,7 @@ def tool( require_parameter_descriptions: bool | None = None, schema_generator: type[GenerateJsonSchema] | None = None, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool | None = None, requires_approval: bool | None = None, metadata: dict[str, Any] | None = None, @@ -132,6 +134,7 @@ def tool( require_parameter_descriptions: bool | None = None, schema_generator: type[GenerateJsonSchema] | None = None, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool | None = None, requires_approval: bool | None = None, metadata: dict[str, Any] | None = None, @@ -183,6 +186,8 @@ async def spam(ctx: RunContext[str], y: float) -> float: strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. If `None`, the default value is determined by the toolset. + text_format: Used to invoke the function using freeform function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. sequential: Whether the function requires a sequential/serial execution environment. Defaults to False. If `None`, the default value is determined by the toolset. requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. @@ -206,6 +211,7 @@ def tool_decorator( require_parameter_descriptions, schema_generator, strict, + text_format, sequential, requires_approval, metadata, @@ -225,6 +231,7 @@ def add_function( require_parameter_descriptions: bool | None = None, schema_generator: type[GenerateJsonSchema] | None = None, strict: bool | None = None, + text_format: Literal['plain'] | TextFormat | None = None, sequential: bool | None = None, requires_approval: bool | None = None, metadata: dict[str, Any] | None = None, @@ -254,6 +261,8 @@ def add_function( strict: Whether to enforce JSON schema compliance (only affects OpenAI). See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. If `None`, the default value is determined by the toolset. + text_format: Used to invoke the function using freeform function calling (only affects OpenAI). + See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info. sequential: Whether the function requires a sequential/serial execution environment. Defaults to False. If `None`, the default value is determined by the toolset. requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. @@ -285,6 +294,7 @@ def add_function( require_parameter_descriptions=require_parameter_descriptions, schema_generator=schema_generator, strict=strict, + text_format=text_format, sequential=sequential, requires_approval=requires_approval, metadata=metadata, diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index f73ffcdab4..0e2b412782 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -65,7 +65,7 @@ dependencies = [ [tool.hatch.metadata.hooks.uv-dynamic-versioning.optional-dependencies] # WARNING if you add optional groups, please update docs/install.md -logfire = ["logfire[httpx]>=3.14.1"] +logfire = ["logfire[httpx]>=3.16.1"] # Models openai = ["openai>=1.107.2"] cohere = ["cohere>=5.18.0; platform_system != 'Emscripten'"] @@ -100,6 +100,8 @@ retries = ["tenacity>=8.2.3"] temporal = ["temporalio==1.18.0"] # DBOS dbos = ["dbos>=1.14.0"] +# freeform function calling with lark context free grammar +lark = ["lark>=1.2.2"] [tool.hatch.metadata] allow-direct-references = true diff --git a/pyproject.toml b/pyproject.toml index c4f36b681d..987449801e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ requires-python = ">=3.10" [tool.hatch.metadata.hooks.uv-dynamic-versioning] dependencies = [ - "pydantic-ai-slim[openai,vertexai,google,groq,anthropic,mistral,cohere,bedrock,huggingface,cli,mcp,evals,ag-ui,retries,temporal,logfire]=={{ version }}", + "pydantic-ai-slim[openai,vertexai,google,groq,anthropic,mistral,cohere,bedrock,huggingface,cli,mcp,evals,ag-ui,retries,temporal,logfire,lark]=={{ version }}", ] [tool.hatch.metadata.hooks.uv-dynamic-versioning.optional-dependencies] diff --git a/tests/models/test_google.py b/tests/models/test_google.py index ce461e2810..77e187dc30 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -2600,6 +2600,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='8a7952', identifier='8a7952', ) ) @@ -2620,6 +2621,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='8a7952', identifier='8a7952', ) ), @@ -2644,6 +2646,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='7d173c', identifier='7d173c', ) ) @@ -2664,6 +2667,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='7d173c', identifier='7d173c', ) ), @@ -2693,6 +2697,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='9ff9cc', identifier='9ff9cc', ) ) @@ -2710,6 +2715,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='2af2a7', identifier='2af2a7', ) ) @@ -2730,6 +2736,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='2af2a7', identifier='2af2a7', ) ), @@ -2758,6 +2765,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='2af2a7', identifier='2af2a7', ) ), @@ -2796,6 +2804,7 @@ async def test_google_image_generation_with_text(allow_model_requests: None, goo content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='00f2af', identifier=IsStr(), ) ), @@ -2831,6 +2840,7 @@ async def test_google_image_or_text_output(allow_model_requests: None, google_pr BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='f82faf', identifier='f82faf', ) ) @@ -2849,6 +2859,7 @@ async def test_google_image_and_text_output(allow_model_requests: None, google_p BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='67b12f', identifier='67b12f', ) ] diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 98dd811051..dc25d181d6 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -41,7 +41,7 @@ from pydantic_ai.profiles.openai import OpenAIModelProfile, openai_model_profile from pydantic_ai.result import RunUsage from pydantic_ai.settings import ModelSettings -from pydantic_ai.tools import ToolDefinition +from pydantic_ai.tools import LarkTextFormat, RegexTextFormat, ToolDefinition from pydantic_ai.usage import RequestUsage from ..conftest import IsDatetime, IsNow, IsStr, TestEnv, try_import @@ -2277,9 +2277,9 @@ def test_model_profile_strict_not_supported(): ) m = OpenAIChatModel('gpt-4o', provider=OpenAIProvider(api_key='foobar')) - tool_param = m._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + tool_definition = m._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] - assert tool_param == snapshot( + assert tool_definition == snapshot( { 'type': 'function', 'function': { @@ -2297,9 +2297,9 @@ def test_model_profile_strict_not_supported(): provider=OpenAIProvider(api_key='foobar'), profile=OpenAIModelProfile(openai_supports_strict_tool_definition=False).update(openai_model_profile('gpt-4o')), ) - tool_param = m._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + tool_definition = m._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] - assert tool_param == snapshot( + assert tool_definition == snapshot( { 'type': 'function', 'function': { @@ -3020,3 +3020,289 @@ def test_deprecated_openai_model(openai_api_key: str): provider = OpenAIProvider(api_key=openai_api_key) OpenAIModel('gpt-4o', provider=provider) # type: ignore[reportDeprecated] + + +@pytest.mark.parametrize('model_name', ['gpt-5', 'gpt-5-mini', 'gpt-5-nano']) +def test_model_profile_gpt5_freeform_function_calling_support(model_name: str): + profile = cast('OpenAIModelProfile', openai_model_profile(model_name)) + assert profile.openai_supports_freeform_function_calling + + +@pytest.mark.parametrize('model_name', ['gpt-4.1', 'gpt-4o', 'gpt-o4-mini']) +def test_model_profile_gpt4_freeform_function_calling_support(model_name: str): + gpt4_profile = cast('OpenAIModelProfile', openai_model_profile(model_name)) + assert not gpt4_profile.openai_supports_freeform_function_calling + + +def test_chat_model_ignores_text_mode_text_when_tool_mapping(): + my_tool = ToolDefinition( + name='analyze_text', + description='Analyze the provided text', + parameters_json_schema={ + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + 'additionalProperties': False, + }, + text_format='plain', + ) + + model = OpenAIChatModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'type': 'function', + 'function': { + 'name': 'analyze_text', + 'description': 'Analyze the provided text', + 'parameters': { + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + 'additionalProperties': False, + }, + }, + } + ) + + +def test_chat_model_ignores_text_mode_lark_when_tool_mapping(): + my_tool = ToolDefinition( + name='parse_data', + description='Parse structured data', + parameters_json_schema={ + 'type': 'object', + 'properties': {'data': {'type': 'string'}}, + 'required': ['data'], + 'additionalProperties': False, + }, + text_format=LarkTextFormat(definition='start: "hello" " " "world"'), + ) + + model = OpenAIChatModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'type': 'function', + 'function': { + 'name': 'parse_data', + 'description': 'Parse structured data', + 'parameters': { + 'type': 'object', + 'properties': {'data': {'type': 'string'}}, + 'required': ['data'], + 'additionalProperties': False, + }, + }, + } + ) + + +def test_chat_model_ignores_text_mode_regex_when_tool_mapping(): + my_tool = ToolDefinition( + name='extract_pattern', + description='Extract data matching pattern', + parameters_json_schema={ + 'type': 'object', + 'properties': {'text': {'type': 'string'}}, + 'required': ['text'], + 'additionalProperties': False, + }, + text_format=RegexTextFormat(pattern=r'\d{4}-\d{2}-\d{2}'), + ) + + model = OpenAIChatModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'type': 'function', + 'function': { + 'name': 'extract_pattern', + 'description': 'Extract data matching pattern', + 'parameters': { + 'type': 'object', + 'properties': {'text': {'type': 'string'}}, + 'required': ['text'], + 'additionalProperties': False, + }, + }, + } + ) + + +def test_responses_model_uses_text_mode_text_when_tool_mapping(): + my_tool = ToolDefinition( + name='analyze_text', + description='Analyze the provided text', + parameters_json_schema={ + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + 'additionalProperties': False, + }, + text_format='plain', + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'name': 'analyze_text', + 'type': 'custom', + 'description': 'Analyze the provided text', + 'format': {'type': 'text'}, + } + ) + + +def test_responses_model_uses_text_mode_lark_when_tool_mapping(): + my_tool = ToolDefinition( + name='parse_data', + description='Parse structured data', + parameters_json_schema={ + 'type': 'object', + 'properties': {'data': {'type': 'string'}}, + 'required': ['data'], + 'additionalProperties': False, + }, + text_format=LarkTextFormat(definition='start: "hello" " " "world"'), + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'name': 'parse_data', + 'type': 'custom', + 'description': 'Parse structured data', + 'format': {'type': 'grammar', 'syntax': 'lark', 'definition': 'start: "hello" " " "world"'}, + } + ) + + +def test_responses_model_uses_text_mode_regex_when_tool_mapping(): + my_tool = ToolDefinition( + name='extract_pattern', + description='Extract data matching pattern', + parameters_json_schema={ + 'type': 'object', + 'properties': {'text': {'type': 'string'}}, + 'required': ['text'], + 'additionalProperties': False, + }, + text_format=RegexTextFormat(pattern=r'\d{4}-\d{2}-\d{2}'), + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'name': 'extract_pattern', + 'type': 'custom', + 'description': 'Extract data matching pattern', + 'format': {'type': 'grammar', 'syntax': 'regex', 'definition': '\\d{4}-\\d{2}-\\d{2}'}, + } + ) + + +def test_chat_model_tool_mapping_regular_function_unchanged(): + my_tool = ToolDefinition( + name='regular_tool', + description='A regular tool', + parameters_json_schema={ + 'type': 'object', + 'properties': {'param': {'type': 'string'}}, + 'required': ['param'], + }, + ) + + model = OpenAIChatModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'type': 'function', + 'function': { + 'name': 'regular_tool', + 'description': 'A regular tool', + 'parameters': {'type': 'object', 'properties': {'param': {'type': 'string'}}, 'required': ['param']}, + }, + } + ) + + +def test_responses_model_tool_mapping_regular_function_unchanged(): + my_tool = ToolDefinition( + name='regular_tool', + description='A regular tool', + parameters_json_schema={ + 'type': 'object', + 'properties': {'param': {'type': 'string'}}, + 'required': ['param'], + }, + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + tool_definition = model._map_tool_definition(my_tool) # type: ignore[reportPrivateUsage] + + assert tool_definition == snapshot( + { + 'name': 'regular_tool', + 'parameters': {'type': 'object', 'properties': {'param': {'type': 'string'}}, 'required': ['param']}, + 'type': 'function', + 'description': 'A regular tool', + 'strict': False, + } + ) + + +def test_tool_definition_single_string_argument(): + valid_tool = ToolDefinition( + name='valid_tool', + description='Valid single string tool', + parameters_json_schema={ + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + 'additionalProperties': False, + }, + ) + assert valid_tool.only_takes_string_argument + assert valid_tool.single_string_argument_name == 'content' + + +def test_tool_definition_multiple_argument_single_string_argument(): + multi_param_tool = ToolDefinition( + name='multi_tool', + description='Multi param tool', + parameters_json_schema={ + 'type': 'object', + 'param1': {'type': 'string'}, + 'properties': { + 'param2': {'type': 'string'}, + }, + 'required': ['param1', 'param2'], + }, + ) + assert not multi_param_tool.only_takes_string_argument + assert multi_param_tool.single_string_argument_name is None + + +def test_tool_definition_single_non_string_argument_single_string_argument(): + non_string_tool = ToolDefinition( + name='non_string_tool', + description='Non-string param tool', + parameters_json_schema={ + 'type': 'object', + 'properties': {'count': {'type': 'integer'}}, + 'required': ['count'], + }, + ) + assert not non_string_tool.only_takes_string_argument + assert non_string_tool.single_string_argument_name is None diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 9bc4e9180e..15c2e04d15 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -1,6 +1,7 @@ import json import re from dataclasses import replace +from datetime import datetime, timezone from typing import Any, cast import pytest @@ -36,11 +37,12 @@ ) from pydantic_ai.agent import Agent from pydantic_ai.builtin_tools import CodeExecutionTool, WebSearchTool -from pydantic_ai.exceptions import ModelHTTPError, ModelRetry +from pydantic_ai.exceptions import ModelHTTPError, ModelRetry, UserError from pydantic_ai.messages import ( BuiltinToolCallEvent, # pyright: ignore[reportDeprecated] BuiltinToolResultEvent, # pyright: ignore[reportDeprecated] ) +from pydantic_ai.models import ModelRequestParameters from pydantic_ai.output import NativeOutput, PromptedOutput, TextOutput, ToolOutput from pydantic_ai.profiles.openai import openai_model_profile from pydantic_ai.tools import ToolDefinition @@ -50,6 +52,8 @@ from .mock_openai import MockOpenAIResponses, get_mock_responses_kwargs, response_message with try_import() as imports_successful: + from openai import NOT_GIVEN + from openai.types.responses.response_output_item import ResponseCustomToolCall from openai.types.responses.response_output_message import Content, ResponseOutputMessage, ResponseOutputText from openai.types.responses.response_reasoning_item import ResponseReasoningItem, Summary from openai.types.responses.response_usage import ResponseUsage @@ -1908,6 +1912,223 @@ async def test_openai_responses_usage_without_tokens_details(allow_model_request ) +def test_openai_responses_model_parallel_tool_calling_enabled(): + regular_tool = ToolDefinition( + name='regular_function', + description='A regular function', + parameters_json_schema={ + 'type': 'object', + 'properties': {'param': {'type': 'string'}}, + 'required': ['param'], + }, + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + + params_regular_only = ModelRequestParameters(function_tools=[regular_tool]) + parallel_calling = model._get_parallel_tool_calling({}, params_regular_only) # type: ignore[reportPrivateUsage] + assert parallel_calling == NOT_GIVEN + + +def test_openai_responses_model_parallel_tool_calling_disabled_with_freeform(): + freeform_tool = ToolDefinition( + name='freeform_analyzer', + description='A freeform analyzer', + parameters_json_schema={ + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + }, + text_format='plain', + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + + params_with_freeform = ModelRequestParameters(function_tools=[freeform_tool]) + parallel_calling = model._get_parallel_tool_calling({}, params_with_freeform) # type: ignore[reportPrivateUsage] + assert not parallel_calling + + +def test_openai_responses_model_parallel_tool_calling_disabled_with_freeform_output(): + freeform_tool = ToolDefinition( + name='freeform_analyzer', + description='A freeform analyzer', + parameters_json_schema={ + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + }, + text_format='plain', + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + + params_with_freeform = ModelRequestParameters(output_tools=[freeform_tool]) + parallel_calling = model._get_parallel_tool_calling({}, params_with_freeform) # type: ignore[reportPrivateUsage] + assert not parallel_calling + + +def test_openai_responses_model_freeform_function_unsupported_model_error(): + freeform_tool = ToolDefinition( + name='freeform_analyzer', + description='A freeform analyzer', + parameters_json_schema={ + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + }, + text_format='plain', + ) + + # GPT-4 doesn't support freeform function calling + model = OpenAIResponsesModel('gpt-4o', provider=OpenAIProvider(api_key='foobar')) + + with pytest.raises( + UserError, match=r'uses freeform function calling but .* does not support freeform function calling' + ): + model._map_tool_definition(freeform_tool) # type: ignore[reportPrivateUsage] + + +def test_openai_responses_model_freeform_function_invalid_signature_error(): + multi_param_tool = ToolDefinition( + name='multi_param_analyzer', + description='Tool with multiple params', + parameters_json_schema={ + 'type': 'object', + 'properties': { + 'param1': {'type': 'string'}, + 'param2': {'type': 'string'}, + }, + 'required': ['param1', 'param2'], + }, + text_format='plain', + ) + + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key='foobar')) + + with pytest.raises(UserError, match=r'is set as a freeform function but does not take a single string argument'): + model._map_tool_definition(multi_param_tool) # type: ignore[reportPrivateUsage] + + +async def test_openai_responses_model_custom_tool_call_response_processing(allow_model_requests: None): + """Test that OpenAI Responses model processes custom_tool_call responses correctly.""" + from pydantic_ai.models import ModelRequestParameters + + content_data = [ + ResponseCustomToolCall( + type='custom_tool_call', + name='analyze_content', + call_id='call_custom_456', + input='This is the raw content input', + ) + ] + + mock_response = response_message(content_data) + mock_client = MockOpenAIResponses.create_mock(mock_response) + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(openai_client=mock_client)) + + freeform_tool = ToolDefinition( + name='analyze_content', + description='Analyze content', + parameters_json_schema={ + 'type': 'object', + 'properties': {'content': {'type': 'string'}}, + 'required': ['content'], + }, + text_format='plain', + ) + + params = ModelRequestParameters(function_tools=[freeform_tool]) + + response = model._process_response(mock_response, params) # type: ignore[reportPrivateUsage] + + assert response == snapshot( + ModelResponse( + parts=[ + ToolCallPart( + tool_name='analyze_content', + args={'content': 'This is the raw content input'}, + tool_call_id='call_custom_456', + ) + ], + model_name='gpt-4o-123', + timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + provider_name='openai', + provider_response_id='123', + ) + ) + + +async def test_openai_responses_model_custom_tool_call_unknown_tool_parsed(allow_model_requests: None): + """Test that unknown custom tool calls are parsed into ToolCallPart with 'input' as argument name. + + Unknown tools are handled during execution (not response processing) per the architecture pattern. + """ + from pydantic_ai.models import ModelRequestParameters + + content_data = [ + ResponseCustomToolCall( + type='custom_tool_call', + name='unknown_analyzer', + call_id='call_unknown_456', + input='Some content', + ) + ] + + mock_response = response_message(content_data) + mock_client = MockOpenAIResponses.create_mock(mock_response) + m = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(openai_client=mock_client)) + + params = ModelRequestParameters() # No tools defined + + # Should not raise an error - unknown tools are handled during execution + response = m._process_response(mock_response, params) # type: ignore[reportPrivateUsage] + + # Verify that a ToolCallPart was created with 'input' as the default argument name + assert len(response.parts) == 1 + tool_call = response.parts[0] + assert isinstance(tool_call, ToolCallPart) + assert tool_call.tool_name == 'unknown_analyzer' + assert tool_call.args == {'input': 'Some content'} + assert tool_call.tool_call_id == 'call_unknown_456' + + +async def test_openai_responses_model_custom_tool_call_invalid_signature_error(allow_model_requests: None): + """Test that OpenAI Responses model raises error for custom tool calls to tools with invalid signatures.""" + from pydantic_ai.models import ModelRequestParameters + + content_data = [ + ResponseCustomToolCall( + type='custom_tool_call', + name='invalid_analyzer', + call_id='call_invalid_456', + input='Some content', + ) + ] + + mock_response = response_message(content_data) + mock_client = MockOpenAIResponses.create_mock(mock_response) + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(openai_client=mock_client)) + + invalid_tool = ToolDefinition( + name='invalid_analyzer', + description='Tool with invalid signature', + parameters_json_schema={ + 'type': 'object', + 'properties': { + 'param1': {'type': 'string'}, + 'param2': {'type': 'string'}, + }, + 'required': ['param1', 'param2'], + }, + ) + + params = ModelRequestParameters(function_tools=[invalid_tool]) + + with pytest.raises(UnexpectedModelBehavior, match='has unexpected arguments'): + model._process_response(mock_response, params) # type: ignore[reportPrivateUsage] + + async def test_openai_responses_model_thinking_part(allow_model_requests: None, openai_api_key: str): m = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) settings = OpenAIResponsesModelSettings(openai_reasoning_effort='high', openai_reasoning_summary='detailed') diff --git a/tests/test_agent.py b/tests/test_agent.py index c8beb08312..8948357caa 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -3616,6 +3616,7 @@ def get_image() -> BinaryContent: BinaryContent( data=b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDATx\x9cc```\x00\x00\x00\x04\x00\x01\xf6\x178\x00\x00\x00\x00IEND\xaeB`\x82', media_type='image/png', + _identifier='image_id_1', identifier='image_id_1', ), ], @@ -3660,13 +3661,15 @@ def get_files(): UserPromptPart( content=[ 'This is file img_001:', - ImageUrl(url='https://example.com/image.jpg', identifier='img_001'), + ImageUrl(url='https://example.com/image.jpg', _identifier='img_001', identifier='img_001'), 'This is file vid_002:', - VideoUrl(url='https://example.com/video.mp4', identifier='vid_002'), + VideoUrl(url='https://example.com/video.mp4', _identifier='vid_002', identifier='vid_002'), 'This is file aud_003:', - AudioUrl(url='https://example.com/audio.mp3', identifier='aud_003'), + AudioUrl(url='https://example.com/audio.mp3', _identifier='aud_003', identifier='aud_003'), 'This is file doc_004:', - DocumentUrl(url='https://example.com/document.pdf', identifier='doc_004'), + DocumentUrl( + url='https://example.com/document.pdf', _identifier='doc_004', identifier='doc_004' + ), ], timestamp=IsNow(tz=timezone.utc), ), diff --git a/tests/test_logfire.py b/tests/test_logfire.py index f434cdbaeb..866c49e2e7 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -459,6 +459,7 @@ async def my_ret(x: int) -> str: }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'sequential': False, 'kind': 'function', 'metadata': None, @@ -905,6 +906,7 @@ class MyOutput: 'description': 'The final response which ends this conversation', 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'sequential': False, 'kind': 'output', 'metadata': None, diff --git a/tests/test_tools.py b/tests/test_tools.py index 9eb8b76fe6..fdbc576831 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1,3 +1,4 @@ +import importlib.util import json import re from collections.abc import Callable @@ -35,7 +36,15 @@ from pydantic_ai.models.function import AgentInfo, FunctionModel from pydantic_ai.models.test import TestModel from pydantic_ai.output import ToolOutput -from pydantic_ai.tools import DeferredToolRequests, DeferredToolResults, ToolApproved, ToolDefinition, ToolDenied +from pydantic_ai.tools import ( + DeferredToolRequests, + DeferredToolResults, + LarkTextFormat, + RegexTextFormat, + ToolApproved, + ToolDefinition, + ToolDenied, +) from pydantic_ai.usage import RequestUsage from .conftest import IsDatetime, IsStr @@ -147,6 +156,7 @@ def test_docstring_google(docstring_format: Literal['google', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -181,6 +191,7 @@ def test_docstring_sphinx(docstring_format: Literal['sphinx', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -223,6 +234,7 @@ def test_docstring_numpy(docstring_format: Literal['numpy', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -265,6 +277,7 @@ def my_tool(x: int) -> str: # pragma: no cover }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -305,6 +318,7 @@ def my_tool(x: int) -> str: # pragma: no cover }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -351,6 +365,7 @@ def my_tool(x: int) -> str: # pragma: no cover }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -385,6 +400,7 @@ def test_only_returns_type(): 'parameters_json_schema': {'additionalProperties': False, 'properties': {}, 'type': 'object'}, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -410,6 +426,7 @@ def test_docstring_unknown(): 'parameters_json_schema': {'additionalProperties': {'type': 'integer'}, 'properties': {}, 'type': 'object'}, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -453,6 +470,7 @@ def test_docstring_google_no_body(docstring_format: Literal['google', 'auto']): }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -489,6 +507,7 @@ def takes_just_model(model: Foo) -> str: }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -534,6 +553,7 @@ def takes_just_model(model: Foo, z: int) -> str: }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -899,6 +919,7 @@ def test_suppress_griffe_logging(caplog: LogCaptureFixture): 'outer_typed_dict_key': None, 'parameters_json_schema': {'additionalProperties': False, 'properties': {}, 'type': 'object'}, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -971,6 +992,7 @@ def my_tool_plain(*, a: int = 1, b: int) -> int: 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -986,6 +1008,7 @@ def my_tool_plain(*, a: int = 1, b: int) -> int: 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -1074,6 +1097,7 @@ def my_tool(x: Annotated[str | None, WithJsonSchema({'type': 'string'})] = None, 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -1087,6 +1111,7 @@ def my_tool(x: Annotated[str | None, WithJsonSchema({'type': 'string'})] = None, 'type': 'object', }, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -1124,6 +1149,7 @@ def get_score(data: Data) -> int: ... # pragma: no branch }, 'outer_typed_dict_key': None, 'strict': None, + 'text_format': None, 'kind': 'function', 'sequential': False, 'metadata': None, @@ -1828,6 +1854,123 @@ def buy(fruit: str): ) +def test_function_text_format_regex_valid(): + format = RegexTextFormat(pattern=r'\d+') + assert format.pattern == r'\d+' + + +def test_function_text_format_regex_invalid(): + with pytest.raises(ValueError, match='Regex pattern is invalid'): + RegexTextFormat(pattern='[') + + +@pytest.mark.skipif(not importlib.util.find_spec('lark'), reason='lark not installed') +def test_function_text_format_lark_valid(): + format = LarkTextFormat(definition='start: "hello"') + assert format.definition == 'start: "hello"' + + +@pytest.mark.skipif(not importlib.util.find_spec('lark'), reason='lark not installed') +def test_function_text_format_lark_invalid(): + with pytest.raises(ValueError, match='Lark grammar is invalid'): + LarkTextFormat(definition='invalid grammar [') + + +def test_tool_definition_single_string_argument(): + schema = { + 'type': 'object', + 'properties': {'text': {'type': 'string'}}, + 'required': ['text'], + 'additionalProperties': False, + } + tool_def = ToolDefinition(name='test', parameters_json_schema=schema) + assert tool_def.single_string_argument_name == 'text' + assert tool_def.only_takes_string_argument + + +def test_tool_definition_multiple_arguments(): + schema = { + 'type': 'object', + 'properties': {'text': {'type': 'string'}, 'count': {'type': 'integer'}}, + 'required': ['text', 'count'], + 'additionalProperties': False, + } + tool_def = ToolDefinition(name='test', parameters_json_schema=schema) + assert tool_def.single_string_argument_name is None + assert not tool_def.only_takes_string_argument + + +def test_tool_definition_non_string_argument(): + schema = { + 'type': 'object', + 'properties': {'count': {'type': 'integer'}}, + 'required': ['count'], + 'additionalProperties': False, + } + tool_def = ToolDefinition(name='test', parameters_json_schema=schema) + assert tool_def.single_string_argument_name is None + assert not tool_def.only_takes_string_argument + + +def test_tool_definition_no_required_fields(): + required: list[str] = [] + schema = { + 'type': 'object', + 'properties': {'text': {'type': 'string'}}, + 'required': required, + 'additionalProperties': False, + } + tool_def = ToolDefinition(name='test', parameters_json_schema=schema) + assert tool_def.single_string_argument_name is None + assert not tool_def.only_takes_string_argument + + +def test_tool_definition_no_properties(): + required: list[str] = [] + properties: dict[str, dict[str, str]] = {} + schema = {'type': 'object', 'properties': properties, 'required': required, 'additionalProperties': False} + tool_def = ToolDefinition(name='test', parameters_json_schema=schema) + assert tool_def.single_string_argument_name is None + assert not tool_def.only_takes_string_argument + + +def test_tool_definition_mismatched_properties_required(): + schema = { + 'type': 'object', + 'properties': {'text': {'type': 'string'}, 'extra': {'type': 'string'}}, + 'required': ['text'], + 'additionalProperties': False, + } + tool_def = ToolDefinition(name='test', parameters_json_schema=schema) + assert tool_def.single_string_argument_name is None + assert not tool_def.only_takes_string_argument + + +def test_agent_tool_with_text_format(): + agent = Agent(TestModel()) + + @agent.tool_plain(text_format='text') + def analyze_text(text: str) -> str: + return f'Analyzed: {text}' # pragma: no cover + + tool_def = agent._function_toolset.tools['analyze_text'].tool_def + assert tool_def.text_format == 'text' + assert tool_def.only_takes_string_argument + + +def test_agent_tool_with_cfg_format(): + agent = Agent(TestModel()) + + cfg = RegexTextFormat(pattern=r'\d+') + + @agent.tool_plain(text_format=cfg) + def parse_numbers(numbers: str) -> str: + return f'Parsed: {numbers}' # pragma: no cover + + tool_def = agent._function_toolset.tools['parse_numbers'].tool_def + assert tool_def.text_format == cfg + + def test_deferred_tool_call_approved_fails(): def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: return ModelResponse( diff --git a/uv.lock b/uv.lock index f496e39ff8..7147ba4e85 100644 --- a/uv.lock +++ b/uv.lock @@ -1702,6 +1702,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, ] +[[package]] +name = "lark" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/37/a13baf0135f348af608c667633cbe5d13aa2c5c15a56ae9ad3e6cba45ae3/lark-1.3.0.tar.gz", hash = "sha256:9a3839d0ca5e1faf7cfa3460e420e859b66bcbde05b634e73c369c8244c5fa48", size = 259551, upload-time = "2025-09-22T13:45:05.072Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/3e/1c6b43277de64fc3c0333b0e72ab7b52ddaaea205210d60d9b9f83c3d0c7/lark-1.3.0-py3-none-any.whl", hash = "sha256:80661f261fb2584a9828a097a2432efd575af27d20be0fd35d17f0fe37253831", size = 113002, upload-time = "2025-09-22T13:45:03.747Z" }, +] + [[package]] name = "logfire" version = "4.0.0" @@ -3129,7 +3138,7 @@ wheels = [ name = "pydantic-ai" source = { editable = "." } dependencies = [ - { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"] }, + { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "lark", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"] }, ] [package.optional-dependencies] @@ -3190,7 +3199,7 @@ lint = [ requires-dist = [ { name = "fasta2a", marker = "extra == 'a2a'", specifier = ">=0.4.1" }, { name = "pydantic-ai-examples", marker = "extra == 'examples'", editable = "examples" }, - { name = "pydantic-ai-slim", extras = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"], editable = "pydantic_ai_slim" }, + { name = "pydantic-ai-slim", extras = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "lark", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"], editable = "pydantic_ai_slim" }, { name = "pydantic-ai-slim", extras = ["dbos"], marker = "extra == 'dbos'", editable = "pydantic_ai_slim" }, ] provides-extras = ["a2a", "dbos", "examples"] @@ -3333,6 +3342,9 @@ groq = [ huggingface = [ { name = "huggingface-hub", extra = ["inference"] }, ] +lark = [ + { name = "lark" }, +] logfire = [ { name = "logfire", extra = ["httpx"] }, ] @@ -3377,7 +3389,8 @@ requires-dist = [ { name = "groq", marker = "extra == 'groq'", specifier = ">=0.25.0" }, { name = "httpx", specifier = ">=0.27" }, { name = "huggingface-hub", extras = ["inference"], marker = "extra == 'huggingface'", specifier = ">=0.33.5" }, - { name = "logfire", extras = ["httpx"], marker = "extra == 'logfire'", specifier = ">=3.14.1" }, + { name = "lark", marker = "extra == 'lark'", specifier = ">=1.2.2" }, + { name = "logfire", extras = ["httpx"], marker = "extra == 'logfire'", specifier = ">=3.16.1" }, { name = "mcp", marker = "extra == 'mcp'", specifier = ">=1.12.3" }, { name = "mistralai", marker = "extra == 'mistral'", specifier = ">=1.9.10" }, { name = "openai", marker = "extra == 'openai'", specifier = ">=1.107.2" }, @@ -3395,7 +3408,7 @@ requires-dist = [ { name = "tenacity", marker = "extra == 'retries'", specifier = ">=8.2.3" }, { name = "typing-inspection", specifier = ">=0.4.0" }, ] -provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "duckduckgo", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "tavily", "temporal", "vertexai"] +provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "duckduckgo", "evals", "google", "groq", "huggingface", "lark", "logfire", "mcp", "mistral", "openai", "retries", "tavily", "temporal", "vertexai"] [[package]] name = "pydantic-core"