Skip to content

Commit 65f7b81

Browse files
TamiTakamiyagithub-actions[bot]wukaixingxpashwinb
authored
feat: Add items and title to ToolParameter/ToolParamDefinition (#3003)
# What does this PR do? <!-- Provide a short summary of what this PR does and why. Link to relevant issues if applicable. --> Add items and title to ToolParameter/ToolParamDefinition. Adding items will resolve the issue that occurs with Gemini LLM when an MCP tool has array-type properties. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> ## Test Plan <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Unite test cases will be added. --------- Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> Co-authored-by: Kai Wu <[email protected]> Co-authored-by: Ashwin Bharambe <[email protected]>
1 parent 1a8d3ed commit 65f7b81

File tree

16 files changed

+1835
-9
lines changed

16 files changed

+1835
-9
lines changed

docs/static/llama-stack-spec.html

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6783,6 +6783,31 @@
67836783
"type": "boolean",
67846784
"default": true
67856785
},
6786+
"items": {
6787+
"oneOf": [
6788+
{
6789+
"type": "null"
6790+
},
6791+
{
6792+
"type": "boolean"
6793+
},
6794+
{
6795+
"type": "number"
6796+
},
6797+
{
6798+
"type": "string"
6799+
},
6800+
{
6801+
"type": "array"
6802+
},
6803+
{
6804+
"type": "object"
6805+
}
6806+
]
6807+
},
6808+
"title": {
6809+
"type": "string"
6810+
},
67866811
"default": {
67876812
"oneOf": [
67886813
{
@@ -7457,6 +7482,14 @@
74577482
"default": true,
74587483
"description": "Whether this parameter is required for tool invocation"
74597484
},
7485+
"items": {
7486+
"type": "object",
7487+
"description": "Type of the elements when parameter_type is array"
7488+
},
7489+
"title": {
7490+
"type": "string",
7491+
"description": "(Optional) Title of the parameter"
7492+
},
74607493
"default": {
74617494
"oneOf": [
74627495
{

docs/static/llama-stack-spec.yaml

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4866,6 +4866,16 @@ components:
48664866
required:
48674867
type: boolean
48684868
default: true
4869+
items:
4870+
oneOf:
4871+
- type: 'null'
4872+
- type: boolean
4873+
- type: number
4874+
- type: string
4875+
- type: array
4876+
- type: object
4877+
title:
4878+
type: string
48694879
default:
48704880
oneOf:
48714881
- type: 'null'
@@ -5403,6 +5413,13 @@ components:
54035413
default: true
54045414
description: >-
54055415
Whether this parameter is required for tool invocation
5416+
items:
5417+
type: object
5418+
description: >-
5419+
Type of the elements when parameter_type is array
5420+
title:
5421+
type: string
5422+
description: (Optional) Title of the parameter
54065423
default:
54075424
oneOf:
54085425
- type: 'null'

llama_stack/apis/tools/tools.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,17 @@ class ToolParameter(BaseModel):
2727
:param parameter_type: Type of the parameter (e.g., string, integer)
2828
:param description: Human-readable description of what the parameter does
2929
:param required: Whether this parameter is required for tool invocation
30+
:param items: Type of the elements when parameter_type is array
31+
:param title: (Optional) Title of the parameter
3032
:param default: (Optional) Default value for the parameter if not provided
3133
"""
3234

3335
name: str
3436
parameter_type: str
3537
description: str
3638
required: bool = Field(default=True)
39+
items: dict | None = None
40+
title: str | None = None
3741
default: Any | None = None
3842

3943

llama_stack/models/llama/datatypes.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,8 @@ class ToolParamDefinition(BaseModel):
9292
param_type: str
9393
description: str | None = None
9494
required: bool | None = True
95+
items: Any | None = None
96+
title: str | None = None
9597
default: Any | None = None
9698

9799

llama_stack/providers/inline/agents/meta_reference/agent_instance.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -798,6 +798,8 @@ async def _initialize_tools(
798798
param_type=param.parameter_type,
799799
description=param.description,
800800
required=param.required,
801+
items=param.items,
802+
title=param.title,
801803
default=param.default,
802804
)
803805
for param in tool_def.parameters
@@ -841,6 +843,8 @@ async def _initialize_tools(
841843
param_type=param.parameter_type,
842844
description=param.description,
843845
required=param.required,
846+
items=param.items,
847+
title=param.title,
844848
default=param.default,
845849
)
846850
for param in tool_def.parameters

llama_stack/providers/utils/inference/openai_compat.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -805,6 +805,10 @@ def convert_tooldef_to_openai_tool(tool: ToolDefinition) -> dict:
805805
properties[param_name].update(description=param.description)
806806
if param.default:
807807
properties[param_name].update(default=param.default)
808+
if param.items:
809+
properties[param_name].update(items=param.items)
810+
if param.title:
811+
properties[param_name].update(title=param.title)
808812
if param.required:
809813
required.append(param_name)
810814

llama_stack/providers/utils/tools/mcp.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,10 @@ async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> ListToolDefs
120120
name=param_name,
121121
parameter_type=param_schema.get("type", "string"),
122122
description=param_schema.get("description", ""),
123+
required="default" not in param_schema,
124+
items=param_schema.get("items", None),
125+
title=param_schema.get("title", None),
126+
default=param_schema.get("default", None),
123127
)
124128
)
125129
tools.append(

tests/common/mcp.py

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,8 @@ def make_mcp_server(required_auth_token: str | None = None, tools: dict[str, Cal
167167
from starlette.responses import Response
168168
from starlette.routing import Mount, Route
169169

170+
from llama_stack.log import get_logger
171+
170172
server = FastMCP("FastMCP Test Server", log_level="WARNING")
171173

172174
tools = tools or default_tools()
@@ -211,52 +213,72 @@ def get_open_port():
211213
return sock.getsockname()[1]
212214

213215
port = get_open_port()
216+
logger = get_logger(__name__, category="tests::mcp")
214217

215218
# make uvicorn logs be less verbose
216219
config = uvicorn.Config(app, host="0.0.0.0", port=port, log_level="warning")
217220
server_instance = uvicorn.Server(config)
218221
app.state.uvicorn_server = server_instance
219222

220223
def run_server():
221-
server_instance.run()
224+
try:
225+
logger.info(f"Starting MCP server on port {port}")
226+
server_instance.run()
227+
logger.info(f"MCP server on port {port} has stopped")
228+
except Exception as e:
229+
logger.error(f"MCP server failed to start on port {port}: {e}")
230+
raise
222231

223232
# Start the server in a new thread
224233
server_thread = threading.Thread(target=run_server, daemon=True)
234+
logger.info(f"Starting MCP server thread on port {port}")
225235
server_thread.start()
226236

227237
# Polling until the server is ready
228238
timeout = 10
229239
start_time = time.time()
230240

231241
server_url = f"http://localhost:{port}/sse"
242+
logger.info(f"Waiting for MCP server to be ready at {server_url}")
243+
232244
while time.time() - start_time < timeout:
233245
try:
234246
response = httpx.get(server_url)
235247
if response.status_code in [200, 401]:
248+
logger.info(f"MCP server is ready on port {port} (status: {response.status_code})")
236249
break
237-
except httpx.RequestError:
250+
except httpx.RequestError as e:
251+
logger.debug(f"Server not ready yet, retrying... ({e})")
238252
pass
239253
time.sleep(0.1)
254+
else:
255+
# If we exit the loop due to timeout
256+
logger.error(f"MCP server failed to start within {timeout} seconds on port {port}")
257+
logger.error(f"Thread alive: {server_thread.is_alive()}")
258+
if server_thread.is_alive():
259+
logger.error("Server thread is still running but not responding to HTTP requests")
240260

241261
try:
242262
yield {"server_url": server_url}
243263
finally:
264+
logger.info(f"Shutting down MCP server on port {port}")
244265
server_instance.should_exit = True
245266
time.sleep(0.5)
246267

247268
# Force shutdown if still running
248269
if server_thread.is_alive():
249270
try:
271+
logger.info("Force shutting down server thread")
250272
if hasattr(server_instance, "servers") and server_instance.servers:
251273
for srv in server_instance.servers:
252274
srv.close()
253275

254276
# Wait for graceful shutdown
255277
server_thread.join(timeout=3)
256278
if server_thread.is_alive():
257-
print("Warning: Server thread still alive after shutdown attempt")
279+
logger.warning("Server thread still alive after shutdown attempt")
258280
except Exception as e:
259-
print(f"Error during server shutdown: {e}")
281+
logger.error(f"Error during server shutdown: {e}")
260282

261283
# CRITICAL: Reset SSE global state to prevent event loop contamination
262284
# Reset the SSE AppStatus singleton that stores anyio.Event objects
Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,167 @@
1+
{
2+
"request": {
3+
"method": "POST",
4+
"url": "http://localhost:11434/api/generate",
5+
"headers": {},
6+
"body": {
7+
"model": "llama3.2:3b-instruct-fp16",
8+
"raw": true,
9+
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\",\n \"default\": \"True\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
10+
"options": {
11+
"temperature": 0.0
12+
},
13+
"stream": true
14+
},
15+
"endpoint": "/api/generate",
16+
"model": "llama3.2:3b-instruct-fp16"
17+
},
18+
"response": {
19+
"body": [
20+
{
21+
"__type__": "ollama._types.GenerateResponse",
22+
"__data__": {
23+
"model": "llama3.2:3b-instruct-fp16",
24+
"created_at": "2025-09-27T18:05:56.663224Z",
25+
"done": false,
26+
"done_reason": null,
27+
"total_duration": null,
28+
"load_duration": null,
29+
"prompt_eval_count": null,
30+
"prompt_eval_duration": null,
31+
"eval_count": null,
32+
"eval_duration": null,
33+
"response": "How",
34+
"thinking": null,
35+
"context": null
36+
}
37+
},
38+
{
39+
"__type__": "ollama._types.GenerateResponse",
40+
"__data__": {
41+
"model": "llama3.2:3b-instruct-fp16",
42+
"created_at": "2025-09-27T18:05:56.706706Z",
43+
"done": false,
44+
"done_reason": null,
45+
"total_duration": null,
46+
"load_duration": null,
47+
"prompt_eval_count": null,
48+
"prompt_eval_duration": null,
49+
"eval_count": null,
50+
"eval_duration": null,
51+
"response": " can",
52+
"thinking": null,
53+
"context": null
54+
}
55+
},
56+
{
57+
"__type__": "ollama._types.GenerateResponse",
58+
"__data__": {
59+
"model": "llama3.2:3b-instruct-fp16",
60+
"created_at": "2025-09-27T18:05:56.751075Z",
61+
"done": false,
62+
"done_reason": null,
63+
"total_duration": null,
64+
"load_duration": null,
65+
"prompt_eval_count": null,
66+
"prompt_eval_duration": null,
67+
"eval_count": null,
68+
"eval_duration": null,
69+
"response": " I",
70+
"thinking": null,
71+
"context": null
72+
}
73+
},
74+
{
75+
"__type__": "ollama._types.GenerateResponse",
76+
"__data__": {
77+
"model": "llama3.2:3b-instruct-fp16",
78+
"created_at": "2025-09-27T18:05:56.794187Z",
79+
"done": false,
80+
"done_reason": null,
81+
"total_duration": null,
82+
"load_duration": null,
83+
"prompt_eval_count": null,
84+
"prompt_eval_duration": null,
85+
"eval_count": null,
86+
"eval_duration": null,
87+
"response": " assist",
88+
"thinking": null,
89+
"context": null
90+
}
91+
},
92+
{
93+
"__type__": "ollama._types.GenerateResponse",
94+
"__data__": {
95+
"model": "llama3.2:3b-instruct-fp16",
96+
"created_at": "2025-09-27T18:05:56.837831Z",
97+
"done": false,
98+
"done_reason": null,
99+
"total_duration": null,
100+
"load_duration": null,
101+
"prompt_eval_count": null,
102+
"prompt_eval_duration": null,
103+
"eval_count": null,
104+
"eval_duration": null,
105+
"response": " you",
106+
"thinking": null,
107+
"context": null
108+
}
109+
},
110+
{
111+
"__type__": "ollama._types.GenerateResponse",
112+
"__data__": {
113+
"model": "llama3.2:3b-instruct-fp16",
114+
"created_at": "2025-09-27T18:05:56.879926Z",
115+
"done": false,
116+
"done_reason": null,
117+
"total_duration": null,
118+
"load_duration": null,
119+
"prompt_eval_count": null,
120+
"prompt_eval_duration": null,
121+
"eval_count": null,
122+
"eval_duration": null,
123+
"response": " further",
124+
"thinking": null,
125+
"context": null
126+
}
127+
},
128+
{
129+
"__type__": "ollama._types.GenerateResponse",
130+
"__data__": {
131+
"model": "llama3.2:3b-instruct-fp16",
132+
"created_at": "2025-09-27T18:05:56.92182Z",
133+
"done": false,
134+
"done_reason": null,
135+
"total_duration": null,
136+
"load_duration": null,
137+
"prompt_eval_count": null,
138+
"prompt_eval_duration": null,
139+
"eval_count": null,
140+
"eval_duration": null,
141+
"response": "?",
142+
"thinking": null,
143+
"context": null
144+
}
145+
},
146+
{
147+
"__type__": "ollama._types.GenerateResponse",
148+
"__data__": {
149+
"model": "llama3.2:3b-instruct-fp16",
150+
"created_at": "2025-09-27T18:05:56.963339Z",
151+
"done": true,
152+
"done_reason": "stop",
153+
"total_duration": 492973041,
154+
"load_duration": 103979375,
155+
"prompt_eval_count": 482,
156+
"prompt_eval_duration": 87032041,
157+
"eval_count": 8,
158+
"eval_duration": 300586375,
159+
"response": "",
160+
"thinking": null,
161+
"context": null
162+
}
163+
}
164+
],
165+
"is_streaming": true
166+
}
167+
}

0 commit comments

Comments
 (0)