Skip to content
Closed
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
89 commits
Select commit Hold shift + click to select a range
c713a6e
infer custom tool format from schema
matthewfranglen Aug 13, 2025
de3bc18
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Aug 15, 2025
e7ca5ca
update free_form to be a parameter, set parallel_tool_calls
matthewfranglen Aug 15, 2025
f0a5cbe
Map the response type
matthewfranglen Aug 15, 2025
68fc7cf
Fix assertion ordering, remove some intermediate variables
matthewfranglen Aug 15, 2025
5d9af16
add free_form output
matthewfranglen Aug 15, 2025
3687580
add context free grammar to free form function calling
matthewfranglen Aug 15, 2025
61f7291
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Aug 15, 2025
7a869b9
get the grammar working on the output
matthewfranglen Aug 15, 2025
5e8cef9
use FunctionTextFormat object to hold fffc/cfg settings
matthewfranglen Aug 15, 2025
79b519a
add literal text as an option for text_format
matthewfranglen Aug 15, 2025
991c01d
remove parameter added in error
matthewfranglen Aug 15, 2025
c70bc1d
address some of the pyright errors
matthewfranglen Aug 15, 2025
9586e6c
remove default value
matthewfranglen Aug 15, 2025
0b47135
drop pedantic check
matthewfranglen Aug 15, 2025
92db07a
update snapshots
matthewfranglen Aug 15, 2025
3e605e0
update docstrings
matthewfranglen Aug 15, 2025
ab45262
update snapshots
matthewfranglen Aug 15, 2025
3982f32
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Aug 29, 2025
f3595b3
work on tests
matthewfranglen Aug 29, 2025
bef5dec
reviewing some of the new tests
matthewfranglen Aug 29, 2025
21e1a0b
typing
matthewfranglen Aug 29, 2025
cfcf7cf
update snapshots
matthewfranglen Aug 29, 2025
1c5c500
more generated tests
matthewfranglen Aug 31, 2025
e0017b4
fix up tests for tools.py
matthewfranglen Aug 31, 2025
307b011
add lark
matthewfranglen Aug 31, 2025
131ab91
use find_spec to see if lark resolves
matthewfranglen Aug 31, 2025
b386eb6
add runtime validation of syntax
matthewfranglen Aug 31, 2025
01988a5
Can't throw the exception and maintain coverage
matthewfranglen Aug 31, 2025
88b8b28
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Aug 31, 2025
3a46eea
revert the uv.lock
matthewfranglen Aug 31, 2025
c084523
use keyword arguments
matthewfranglen Sep 1, 2025
ef1a696
add missing property decorator
matthewfranglen Sep 1, 2025
e3f514d
remove deprecated setting
matthewfranglen Sep 2, 2025
0dbcdaf
update snapshot
matthewfranglen Sep 2, 2025
4533df1
get coverage on the tests up to 100%
matthewfranglen Sep 2, 2025
fc477bb
review the openai tests
matthewfranglen Sep 2, 2025
4cacf9b
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Sep 3, 2025
185c929
fiddling with tests
matthewfranglen Sep 4, 2025
a81e7b9
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Sep 4, 2025
4a6e540
add simple test for output tool
matthewfranglen Sep 4, 2025
8714253
remove utc import
matthewfranglen Sep 4, 2025
b55c9ab
use older utc
matthewfranglen Sep 4, 2025
b347edc
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Sep 4, 2025
997d2ac
formatting
matthewfranglen Sep 4, 2025
afdd6ef
drop NOT_GIVEN import
matthewfranglen Sep 4, 2025
175eee9
formatting
matthewfranglen Sep 4, 2025
19eb167
address linter errors
matthewfranglen Sep 4, 2025
6e259c8
TypeError: Logfire.instrument_pydantic_ai() got an unexpected keyword…
matthewfranglen Sep 4, 2025
742fb91
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Sep 5, 2025
d69daad
remove the condition over the output tools
matthewfranglen Sep 5, 2025
d78a5c2
remove redundant line pragma
matthewfranglen Sep 5, 2025
dc1c182
move the no cover line
matthewfranglen Sep 5, 2025
d1fb3a4
formatting
matthewfranglen Sep 5, 2025
97b4d82
revert version change
matthewfranglen Sep 5, 2025
c54f26e
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Sep 6, 2025
e206e3e
start on documentation
matthewfranglen Sep 6, 2025
c483fda
test the examples manually, fix errors
matthewfranglen Sep 6, 2025
5265211
no fancy comma
matthewfranglen Sep 6, 2025
f337820
fix bad syntax in example
matthewfranglen Sep 6, 2025
2c7367f
Add section on output tool use
matthewfranglen Sep 6, 2025
f3a4afd
double to single quotes
matthewfranglen Sep 6, 2025
c4665a2
fix the output_tool
matthewfranglen Sep 6, 2025
b86d2b1
actually use a lark grammar
matthewfranglen Sep 6, 2025
d713c29
Update docs/models/openai.md
matthewfranglen Sep 9, 2025
d0c346c
Update pydantic_ai_slim/pydantic_ai/models/openai.py
matthewfranglen Sep 9, 2025
3037e6e
make the introduction to cfg stronger
matthewfranglen Sep 9, 2025
4e2264d
make FunctionTextFormat directly importable from pydantic_ai
matthewfranglen Sep 9, 2025
e28836b
use direct import
matthewfranglen Sep 9, 2025
b49cd81
add headings
matthewfranglen Sep 9, 2025
a7112f4
of course there was an easier way to do this
matthewfranglen Sep 9, 2025
7c96803
quote coding terms
matthewfranglen Sep 9, 2025
5d2b372
free-form -> freeform
matthewfranglen Sep 9, 2025
ec057c8
gpt -> GPT or quoted
matthewfranglen Sep 9, 2025
c949c83
free form -> freeform
matthewfranglen Sep 9, 2025
36a0759
Merge branch 'main' into freeform-and-cfg-tools
matthewfranglen Oct 12, 2025
ae64d6b
fix imports
matthewfranglen Oct 12, 2025
f5ca42e
add missing import
matthewfranglen Oct 12, 2025
e7bea60
snapshot updates
matthewfranglen Oct 12, 2025
637774f
update regex match over error message
matthewfranglen Oct 12, 2025
9cf0931
add more known model names
matthewfranglen Oct 12, 2025
8c6c976
fix uv.lock, undo some of the changes
matthewfranglen Oct 12, 2025
febe88d
another snapshot update related to uv.lock
matthewfranglen Oct 12, 2025
3106219
text_format text -> plain
matthewfranglen Oct 13, 2025
673ef1e
default to an argument name of input
matthewfranglen Oct 13, 2025
2581873
use !r formatting for tool name
matthewfranglen Oct 13, 2025
9ec5b69
link to best practices
matthewfranglen Oct 13, 2025
3927cf0
FunctionTextFormat -> TextFormat, change handling
matthewfranglen Oct 13, 2025
5990d8b
Update a test to check for unknown tool mapping
matthewfranglen Oct 13, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion pydantic_ai_slim/pydantic_ai/_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,6 +591,7 @@ class OutputObjectDefinition:
name: str | None = None
description: str | None = None
strict: bool | None = None
free_form: bool = False


@dataclass(init=False)
Expand Down Expand Up @@ -621,6 +622,7 @@ def __init__(
name: str | None = None,
description: str | None = None,
strict: bool | None = None,
free_form: bool = False,
):
if inspect.isfunction(output) or inspect.ismethod(output):
self._function_schema = _function_schema.function_schema(output, GenerateToolJsonSchema)
Expand Down Expand Up @@ -663,6 +665,7 @@ def __init__(
description=description,
json_schema=json_schema,
strict=strict,
free_form=free_form,
)

async def process(
Expand Down Expand Up @@ -925,14 +928,17 @@ def build(
name = output.name
description = output.description
strict = output.strict
free_form = output.free_form

output = output.output

description = description or default_description
if strict is None:
strict = default_strict

processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
processor = ObjectOutputProcessor(
output=output, description=description, strict=strict, free_form=free_form
)
object_def = processor.object_def

if name is None:
Expand All @@ -957,6 +963,7 @@ def build(
description=description,
parameters_json_schema=object_def.json_schema,
strict=object_def.strict,
free_form=object_def.free_form,
outer_typed_dict_key=processor.outer_typed_dict_key,
kind='output',
)
Expand Down
32 changes: 31 additions & 1 deletion pydantic_ai_slim/pydantic_ai/agent/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import TYPE_CHECKING, Any, Callable, ClassVar, cast, overload
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Literal, cast, overload

from opentelemetry.trace import NoOpTracer, use_span
from pydantic.json_schema import GenerateJsonSchema
Expand Down Expand Up @@ -963,6 +963,9 @@ def tool(
require_parameter_descriptions: bool = False,
schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
strict: bool | None = None,
free_form: bool | None = None,
grammar_syntax: Literal['regex', 'lark'] | None = None,
grammar_definition: str | None = None,
) -> Callable[[ToolFuncContext[AgentDepsT, ToolParams]], ToolFuncContext[AgentDepsT, ToolParams]]: ...

def tool(
Expand All @@ -977,6 +980,9 @@ def tool(
require_parameter_descriptions: bool = False,
schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
strict: bool | None = None,
free_form: bool | None = None,
grammar_syntax: Literal['regex', 'lark'] | None = None,
grammar_definition: str | None = None,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@matthewfranglen I know you still have some work planned on this PR before it's really ready for review, but please consider the API I proposed in #2513 (comment). I'd prefer one argument taking an object over 3 that need to be used together

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry I missed your intent with that part. It's a very good idea I will certainly do that, and it will clean up what I have done so far. Thanks for the reminder.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe I have addressed this now

) -> Any:
"""Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument.

Expand Down Expand Up @@ -1021,6 +1027,12 @@ async def spam(ctx: RunContext[str], y: float) -> float:
schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
strict: Whether to enforce JSON schema compliance (only affects OpenAI).
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
free_form: Whether to invoke the function using free-form function calling (only affects OpenAI).
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
grammar_syntax: Whether to restrict the free-form function calling argument according to this syntax.
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
grammar_definition: Whether to restrict the free-form function calling argument according to this syntax.
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
"""

def tool_decorator(
Expand All @@ -1037,6 +1049,9 @@ def tool_decorator(
require_parameter_descriptions,
schema_generator,
strict,
free_form,
grammar_syntax,
grammar_definition,
)
return func_

Expand All @@ -1057,6 +1072,9 @@ def tool_plain(
require_parameter_descriptions: bool = False,
schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
strict: bool | None = None,
free_form: bool | None = None,
grammar_syntax: Literal['regex', 'lark'] | None = None,
grammar_definition: str | None = None,
) -> Callable[[ToolFuncPlain[ToolParams]], ToolFuncPlain[ToolParams]]: ...

def tool_plain(
Expand All @@ -1071,6 +1089,9 @@ def tool_plain(
require_parameter_descriptions: bool = False,
schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
strict: bool | None = None,
free_form: bool | None = None,
grammar_syntax: Literal['regex', 'lark'] | None = None,
grammar_definition: str | None = None,
) -> Any:
"""Decorator to register a tool function which DOES NOT take `RunContext` as an argument.

Expand Down Expand Up @@ -1115,6 +1136,12 @@ async def spam(ctx: RunContext[str]) -> float:
schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
strict: Whether to enforce JSON schema compliance (only affects OpenAI).
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
free_form: Whether to invoke the function using free-form function calling (only affects OpenAI).
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
grammar_syntax: Whether to restrict the free-form function calling argument according to this syntax.
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
grammar_definition: Whether to restrict the free-form function calling argument according to this syntax.
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
"""

def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams]:
Expand All @@ -1129,6 +1156,9 @@ def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams
require_parameter_descriptions,
schema_generator,
strict,
free_form,
grammar_syntax,
grammar_definition,
)
return func_

Expand Down
60 changes: 51 additions & 9 deletions pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ async def request(
response = await self._completions_create(
messages, False, cast(OpenAIModelSettings, model_settings or {}), model_request_parameters
)
model_response = self._process_response(response)
model_response = self._process_response(response, model_request_parameters)
return model_response

@asynccontextmanager
Expand Down Expand Up @@ -762,7 +762,7 @@ async def request(
response = await self._responses_create(
messages, False, cast(OpenAIResponsesModelSettings, model_settings or {}), model_request_parameters
)
return self._process_response(response)
return self._process_response(response, model_request_parameters)

@asynccontextmanager
async def request_stream(
Expand All @@ -779,7 +779,11 @@ async def request_stream(
async with response:
yield await self._process_streamed_response(response, model_request_parameters)

def _process_response(self, response: responses.Response) -> ModelResponse:
def _process_response(
self,
response: responses.Response,
model_request_parameters: ModelRequestParameters,
) -> ModelResponse:
"""Process a non-streamed response, and prepare a message to return."""
timestamp = number_to_datetime(response.created_at)
items: list[ModelResponsePart] = []
Expand All @@ -795,6 +799,12 @@ def _process_response(self, response: responses.Response) -> ModelResponse:
items.append(TextPart(content.text))
elif item.type == 'function_call':
items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
elif item.type == 'custom_tool_call':
if item.name not in model_request_parameters.tool_defs:
raise UnexpectedModelBehavior(f'Unknown tool called: {item.name}')
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd prefer to handle this same as we currently handle invalid tool calls, inside CallToolsNode. So let's create a ToolCallPart anyway, we can just call the argument input if we don't know the intended name.

tool = model_request_parameters.tool_defs[item.name]
argument_name = tool.single_string_argument_name
items.append(ToolCallPart(item.name, {argument_name: item.input}, tool_call_id=item.call_id))
return ModelResponse(
items,
usage=_map_usage(response),
Expand Down Expand Up @@ -893,11 +903,14 @@ async def _responses_create(
try:
extra_headers = model_settings.get('extra_headers', {})
extra_headers.setdefault('User-Agent', get_user_agent())
parallel_tool_calls = self._get_parallel_tool_calling(
model_settings=model_settings, model_request_parameters=model_request_parameters
)
return await self.client.responses.create(
input=openai_messages,
model=self._model_name,
instructions=instructions,
parallel_tool_calls=model_settings.get('parallel_tool_calls', NOT_GIVEN),
parallel_tool_calls=parallel_tool_calls,
tools=tools or NOT_GIVEN,
tool_choice=tool_choice or NOT_GIVEN,
max_output_tokens=model_settings.get('max_tokens', NOT_GIVEN),
Expand Down Expand Up @@ -937,7 +950,18 @@ def _get_reasoning(self, model_settings: OpenAIResponsesModelSettings) -> Reason
return NOT_GIVEN
return Reasoning(effort=reasoning_effort, summary=reasoning_summary)

def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.FunctionToolParam]:
def _get_parallel_tool_calling(
self, model_settings: OpenAIResponsesModelSettings, model_request_parameters: ModelRequestParameters
) -> bool | NotGiven:
if any(tool_definition.free_form for tool_definition in model_request_parameters.tool_defs.values()):
return False
if any(tool_definition.free_form for tool_definition in model_request_parameters.output_tools):
return False
return model_settings.get('parallel_tool_calls', NOT_GIVEN)

def _get_tools(
self, model_request_parameters: ModelRequestParameters
) -> list[responses.FunctionToolParam | responses.CustomToolParam]:
return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()]

def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.ToolParam]:
Expand All @@ -960,15 +984,33 @@ def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) -
)
return tools

def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam:
def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam | responses.CustomToolParam:
model_profile = OpenAIModelProfile.from_profile(self.profile)
if f.free_form:
if not model_profile.openai_supports_freeform_function_calling:
raise UserError(
f'`{f.name}` is set as free_form but {model_profile.name} does not support free form function calling.'
)
if not f.only_takes_string_argument:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we enforce this at the tool definition level, in the Tool class or something?

raise UserError(f'`{f.name}` is set as free_form but does not take a single string argument.')
if f.grammar_syntax is not None:
format = {'type': 'grammar', 'syntax': f.grammar_syntax, 'definition': f.grammar_definition}
else:
format = {'type': 'text'}
tool_param: responses.CustomToolParam = {
'name': f.name,
'type': 'custom',
'description': f.description or '',
'format': format,
}
return tool_param

return {
'name': f.name,
'parameters': f.parameters_json_schema,
'type': 'function',
'description': f.description,
'strict': bool(
f.strict and OpenAIModelProfile.from_profile(self.profile).openai_supports_strict_tool_definition
),
'strict': bool(f.strict and model_profile.openai_supports_strict_tool_definition),
}

async def _map_messages(
Expand Down
4 changes: 4 additions & 0 deletions pydantic_ai_slim/pydantic_ai/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,8 @@ class Vehicle(BaseModel):
"""The maximum number of retries for the tool."""
strict: bool | None
"""Whether to use strict mode for the tool."""
free_form: bool
"""Whether to invoke the function with free-form function calling for tool calls."""

def __init__(
self,
Expand All @@ -121,12 +123,14 @@ def __init__(
description: str | None = None,
max_retries: int | None = None,
strict: bool | None = None,
free_form: bool = False,
):
self.output = type_
self.name = name
self.description = description
self.max_retries = max_retries
self.strict = strict
self.free_form = free_form


@dataclass(init=False)
Expand Down
7 changes: 7 additions & 0 deletions pydantic_ai_slim/pydantic_ai/profiles/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,16 @@ class OpenAIModelProfile(ModelProfile):
openai_system_prompt_role: OpenAISystemPromptRole | None = None
"""The role to use for the system prompt message. If not provided, defaults to `'system'`."""

# GPT-5 introduced support for directly calling a function with a string.
openai_supports_freeform_function_calling: bool = False
"""Whether the provider accepts the value ``type='custom'`` for tools in the
request payload."""


def openai_model_profile(model_name: str) -> ModelProfile:
"""Get the model profile for an OpenAI model."""
is_reasoning_model = model_name.startswith('o') or model_name.startswith('gpt-5')
is_freeform_function_calling_model = model_name.startswith('gpt-5')
# Structured Outputs (output mode 'native') is only supported with the gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots and later.
# We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
# when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
Expand All @@ -50,6 +56,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
supports_json_schema_output=True,
supports_json_object_output=True,
openai_supports_sampling_settings=not is_reasoning_model,
openai_supports_freeform_function_calling=is_freeform_function_calling_model,
openai_system_prompt_role=openai_system_prompt_role,
)

Expand Down
Loading
Loading