@@ -70,10 +70,13 @@ def trace_tool_call(
70
70
function_response_event: The event with the function response details.
71
71
"""
72
72
span = trace .get_current_span ()
73
- span .set_attribute ('gen_ai.system' , 'gcp.vertex.agent' )
73
+
74
+ # Standard OpenTelemetry GenAI attributes as of OTel SemConv v1.36.0 for Agents and Frameworks
75
+ span .set_attribute ('gen_ai.system' , 'gcp.vertex_ai' )
74
76
span .set_attribute ('gen_ai.operation.name' , 'execute_tool' )
75
77
span .set_attribute ('gen_ai.tool.name' , tool .name )
76
78
span .set_attribute ('gen_ai.tool.description' , tool .description )
79
+
77
80
tool_call_id = '<not specified>'
78
81
tool_response = '<not specified>'
79
82
if function_response_event .content .parts :
@@ -86,6 +89,7 @@ def trace_tool_call(
86
89
87
90
span .set_attribute ('gen_ai.tool.call.id' , tool_call_id )
88
91
92
+ # Vendor-specific attributes (moved from gen_ai.* to gcp.vertex.agent.*)
89
93
if not isinstance (tool_response , dict ):
90
94
tool_response = {'result' : tool_response }
91
95
span .set_attribute (
@@ -121,12 +125,15 @@ def trace_merged_tool_calls(
121
125
"""
122
126
123
127
span = trace .get_current_span ()
124
- span .set_attribute ('gen_ai.system' , 'gcp.vertex.agent' )
128
+
129
+ # Standard OpenTelemetry GenAI attributes
130
+ span .set_attribute ('gen_ai.system' , 'gcp.vertex_ai' )
125
131
span .set_attribute ('gen_ai.operation.name' , 'execute_tool' )
126
132
span .set_attribute ('gen_ai.tool.name' , '(merged tools)' )
127
133
span .set_attribute ('gen_ai.tool.description' , '(merged tools)' )
128
134
span .set_attribute ('gen_ai.tool.call.id' , response_event_id )
129
135
136
+ # Vendor-specific attributes
130
137
span .set_attribute ('gcp.vertex.agent.tool_call_args' , 'N/A' )
131
138
span .set_attribute ('gcp.vertex.agent.event_id' , response_event_id )
132
139
try :
@@ -167,23 +174,37 @@ def trace_call_llm(
167
174
llm_response: The LLM response object.
168
175
"""
169
176
span = trace .get_current_span ()
170
- # Special standard Open Telemetry GenaI attributes that indicate
171
- # that this is a span related to a Generative AI system.
172
- span .set_attribute ('gen_ai.system' , 'gcp.vertex.agent ' )
177
+
178
+ # Standard OpenTelemetry GenAI attributes
179
+ span .set_attribute ('gen_ai.system' , 'gcp.vertex_ai ' )
173
180
span .set_attribute ('gen_ai.request.model' , llm_request .model )
181
+
182
+ if hasattr (llm_response , 'id' ) and llm_response .id :
183
+ span .set_attribute ('gen_ai.response.id' , llm_response .id )
184
+
185
+ # Set response model if different from request model
186
+ if (
187
+ hasattr (llm_response , 'model' )
188
+ and llm_response .model
189
+ and llm_response .model != llm_request .model
190
+ ):
191
+ span .set_attribute ('gen_ai.response.model' , llm_response .model )
192
+
174
193
span .set_attribute (
175
194
'gcp.vertex.agent.invocation_id' , invocation_context .invocation_id
176
195
)
177
196
span .set_attribute (
178
197
'gcp.vertex.agent.session_id' , invocation_context .session .id
179
198
)
180
199
span .set_attribute ('gcp.vertex.agent.event_id' , event_id )
200
+
181
201
# Consider removing once GenAI SDK provides a way to record this info.
182
202
span .set_attribute (
183
203
'gcp.vertex.agent.llm_request' ,
184
204
_safe_json_serialize (_build_llm_request_for_trace (llm_request )),
185
205
)
186
- # Consider removing once GenAI SDK provides a way to record this info.
206
+
207
+ # Standard GenAI request attributes
187
208
if llm_request .config :
188
209
if llm_request .config .top_p :
189
210
span .set_attribute (
@@ -195,6 +216,14 @@ def trace_call_llm(
195
216
'gen_ai.request.max_tokens' ,
196
217
llm_request .config .max_output_tokens ,
197
218
)
219
+ if (
220
+ hasattr (llm_request .config , 'temperature' )
221
+ and llm_request .config .temperature is not None
222
+ ):
223
+ span .set_attribute (
224
+ 'gen_ai.request.temperature' ,
225
+ llm_request .config .temperature ,
226
+ )
198
227
199
228
try :
200
229
llm_response_json = llm_response .model_dump_json (exclude_none = True )
@@ -206,6 +235,7 @@ def trace_call_llm(
206
235
llm_response_json ,
207
236
)
208
237
238
+ # Standard GenAI usage and response attributes
209
239
if llm_response .usage_metadata is not None :
210
240
span .set_attribute (
211
241
'gen_ai.usage.input_tokens' ,
@@ -286,3 +316,41 @@ def _build_llm_request_for_trace(llm_request: LlmRequest) -> dict[str, Any]:
286
316
)
287
317
)
288
318
return result
319
+
320
+
321
+ def _create_span_name (operation_name : str , model_name : str ) -> str :
322
+ """Creates a span name following OpenTelemetry GenAI conventions.
323
+
324
+ Args:
325
+ operation_name: The GenAI operation name (e.g., 'generate_content', 'execute_tool').
326
+ model_name: The model name being used.
327
+
328
+ Returns:
329
+ A span name in the format '{operation_name} {model_name}'.
330
+ """
331
+ return f'{ operation_name } { model_name } '
332
+
333
+
334
+ def add_genai_prompt_event (span : trace .Span , prompt_content : str ):
335
+ """Adds a GenAI prompt event to the span following OpenTelemetry conventions.
336
+
337
+ Args:
338
+ span: The OpenTelemetry span to add the event to.
339
+ prompt_content: The prompt content as a JSON string.
340
+ """
341
+ span .add_event (
342
+ name = 'gen_ai.content.prompt' , attributes = {'gen_ai.prompt' : prompt_content }
343
+ )
344
+
345
+
346
+ def add_genai_completion_event (span : trace .Span , completion_content : str ):
347
+ """Adds a GenAI completion event to the span following OpenTelemetry conventions.
348
+
349
+ Args:
350
+ span: The OpenTelemetry span to add the event to.
351
+ completion_content: The completion content as a JSON string.
352
+ """
353
+ span .add_event (
354
+ name = 'gen_ai.content.completion' ,
355
+ attributes = {'gen_ai.completion' : completion_content },
356
+ )
0 commit comments