Skip to content

Commit a57f7d9

Browse files
hkt74copybara-github
authored andcommitted
feat: enable responseId for Gemini Developer API
remove thinking tests in v1alpha, thinking is GA now PiperOrigin-RevId: 789512508
1 parent 8a45746 commit a57f7d9

10 files changed

+39
-127
lines changed

google/genai/batches.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1492,6 +1492,9 @@ def _GenerateContentResponse_from_mldev(
14921492
if getv(from_object, ['promptFeedback']) is not None:
14931493
setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
14941494

1495+
if getv(from_object, ['responseId']) is not None:
1496+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
1497+
14951498
if getv(from_object, ['usageMetadata']) is not None:
14961499
setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
14971500

google/genai/models.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3665,6 +3665,9 @@ def _GenerateContentResponse_from_mldev(
36653665
if getv(from_object, ['promptFeedback']) is not None:
36663666
setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
36673667

3668+
if getv(from_object, ['responseId']) is not None:
3669+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
3670+
36683671
if getv(from_object, ['usageMetadata']) is not None:
36693672
setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
36703673

@@ -4317,15 +4320,15 @@ def _GenerateContentResponse_from_vertex(
43174320
if getv(from_object, ['createTime']) is not None:
43184321
setv(to_object, ['create_time'], getv(from_object, ['createTime']))
43194322

4320-
if getv(from_object, ['responseId']) is not None:
4321-
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
4322-
43234323
if getv(from_object, ['modelVersion']) is not None:
43244324
setv(to_object, ['model_version'], getv(from_object, ['modelVersion']))
43254325

43264326
if getv(from_object, ['promptFeedback']) is not None:
43274327
setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
43284328

4329+
if getv(from_object, ['responseId']) is not None:
4330+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
4331+
43294332
if getv(from_object, ['usageMetadata']) is not None:
43304333
setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
43314334

google/genai/tests/chats/test_send_message.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def test_uploaded_file_uri(client):
147147
[
148148
'what is the image about?',
149149
types.Part.from_uri(
150-
file_uri='https://generativelanguage.googleapis.com/v1beta/files/9w04rxmcgsp8',
150+
file_uri='https://generativelanguage.googleapis.com/v1beta/files/az606f58k7zj',
151151
mime_type='image/png',
152152
),
153153
],

google/genai/tests/models/test_generate_content.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -612,10 +612,6 @@ async def test_simple_shared_generation_config_stream_async(client):
612612

613613

614614
def test_log_probs(client):
615-
# ML DEV discovery doc supports response_logprobs but the backend
616-
# does not.
617-
# TODO: update replay test json files when ML Dev backend is updated.
618-
with pytest_helper.exception_if_mldev(client, errors.ClientError):
619615
client.models.generate_content(
620616
model='gemini-1.5-flash',
621617
contents='What is your name?',
@@ -2316,8 +2312,4 @@ async def test_error_handling_stream_async(client):
23162312
continue
23172313

23182314
except errors.ClientError as e:
2319-
assert (
2320-
e.message
2321-
== 'Developer instruction is not enabled for'
2322-
' models/gemini-2.0-flash-exp-image-generation'
2323-
)
2315+
assert ('Developer instruction is not enabled' in e.message)

google/genai/tests/models/test_generate_content_config_zero_value.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -68,10 +68,6 @@
6868
'logprobs': 0,
6969
},
7070
),
71-
# ML DEV discovery doc supports response_logprobs but the backend
72-
# does not.
73-
# TODO: update replay test json files when ML Dev backend is updated.
74-
exception_if_mldev='INVALID_ARGUMENT',
7571
),
7672
pytest_helper.TestTableItem(
7773
name='test_presence_penalty_zero',

google/genai/tests/models/test_generate_content_model.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -25,18 +25,8 @@
2525
'projects/964831358985/locations/us-central1/endpoints/7226683110069370880'
2626
)
2727

28-
tuned_model_with_model_name = 'tunedModels/generatenum5443-ekrw7ie9wis23zbeogbw6jq8'
2928

3029
test_table: list[pytest_helper.TestTableItem] = [
31-
pytest_helper.TestTableItem(
32-
name='test_tuned_model_with_model_name',
33-
parameters=types._GenerateContentParameters(
34-
model=tuned_model_with_model_name,
35-
contents=t.t_contents('how are you doing?'),
36-
),
37-
exception_if_vertex='404',
38-
skip_in_api_mode='It requires a specific ML Dev account.',
39-
),
4030
pytest_helper.TestTableItem(
4131
name='test_tuned_model',
4232
parameters=types._GenerateContentParameters(
@@ -102,18 +92,6 @@ def test_tuned_model_stream(client):
10292
assert chunks >= 2
10393

10494

105-
def test_tuned_model_with_model_name_stream(client):
106-
with pytest_helper.exception_if_vertex(client, errors.ClientError):
107-
chunks = 0
108-
for chunk in client.models.generate_content_stream(
109-
model=tuned_model_with_model_name,
110-
contents='Tell me a story in 300 words.',
111-
):
112-
chunks += 1
113-
assert chunk.text is not None or chunk.candidates[0].finish_reason
114-
assert chunks >= 1
115-
116-
11795
def test_start_with_models_stream(client):
11896
# vertex ai require publishers/ prefix for gemini
11997
with pytest_helper.exception_if_vertex(client, errors.ClientError):

google/genai/tests/models/test_generate_content_part.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@
123123
types.PartDict({
124124
'file_data': {
125125
'file_uri': (
126-
'https://generativelanguage.googleapis.com/v1beta/files/dez0g1rajz7a'
126+
'https://generativelanguage.googleapis.com/v1beta/files/az606f58k7zj'
127127
),
128128
'mime_type': 'image/png',
129129
}
@@ -151,7 +151,7 @@
151151
types.PartDict({
152152
'file_data': {
153153
'file_uri': (
154-
'https://generativelanguage.googleapis.com/v1beta/files/f1rtzshxniw4'
154+
'https://generativelanguage.googleapis.com/v1beta/files/wma71fsppgfp'
155155
),
156156
'mime_type': 'image/jpeg',
157157
}
@@ -181,7 +181,7 @@
181181
types.PartDict({
182182
'file_data': {
183183
'file_uri': (
184-
'https://generativelanguage.googleapis.com/v1beta/files/8c7hpi2zez57'
184+
'https://generativelanguage.googleapis.com/v1beta/files/r6ksskgddyxb'
185185
),
186186
'mime_type': 'application/pdf',
187187
}
@@ -214,7 +214,7 @@
214214
types.PartDict({
215215
'file_data': {
216216
'file_uri': (
217-
'https://generativelanguage.googleapis.com/v1beta/files/siotqjy5g6mw'
217+
'https://generativelanguage.googleapis.com/v1beta/files/57w3vpfomj71'
218218
),
219219
'mime_type': 'video/mp4',
220220
}
@@ -248,9 +248,9 @@
248248
types.PartDict({
249249
'file_data': {
250250
'file_uri': (
251-
'https://generativelanguage.googleapis.com/v1beta/files/j2mpcv8edrqu'
251+
'https://generativelanguage.googleapis.com/v1beta/files/wkvof7yeqitl'
252252
),
253-
'mime_type': 'audio/mp4',
253+
'mime_type': 'audio/mpeg',
254254
}
255255
})
256256
],
@@ -274,7 +274,7 @@
274274
types.Part(text='summarize this video'),
275275
types.Part(
276276
file_data=types.FileData(
277-
file_uri='https://generativelanguage.googleapis.com/v1beta/files/tyvaih24jwje',
277+
file_uri='https://generativelanguage.googleapis.com/v1beta/files/57w3vpfomj71',
278278
mime_type= 'video/mp4',
279279
),
280280
video_metadata=types.VideoMetadata(
@@ -672,7 +672,7 @@ def test_video_audio_uri(client):
672672
def test_file(client):
673673
with pytest_helper.exception_if_vertex(client, errors.ClientError):
674674
file = types.File(
675-
uri='https://generativelanguage.googleapis.com/v1beta/files/cmpqbqoptyaa',
675+
uri='https://generativelanguage.googleapis.com/v1beta/files/ly6p67c47xgq',
676676
mime_type='text/plain',
677677
)
678678
client.models.generate_content(

google/genai/tests/models/test_generate_content_thought.py

Lines changed: 10 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -22,36 +22,16 @@
2222

2323
test_table: list[pytest_helper.TestTableItem] = [
2424
pytest_helper.TestTableItem(
25-
name='test_generate_content_thought',
25+
name='test_disable_thinking',
2626
parameters=types._GenerateContentParameters(
27-
model='gemini-2.5-pro-preview-03-25',
27+
model='gemini-2.5-flash',
2828
contents=t.t_contents('Explain the monty hall problem.'),
2929
config={
3030
'thinking_config': {
31-
'include_thoughts': True,
32-
'thinking_budget': 10000},
31+
'thinking_budget': 0},
3332
},
3433
),
3534
),
36-
pytest_helper.TestTableItem(
37-
name='test_generate_content_thought_v1alpha',
38-
parameters=types._GenerateContentParameters(
39-
model='gemini-2.5-pro-preview-03-25',
40-
contents=t.t_contents(
41-
'What is the sum of natural numbers from 1 to 100?'
42-
),
43-
config={
44-
'thinking_config': {
45-
'include_thoughts': True,
46-
'thinking_budget': 10000
47-
},
48-
'http_options': {
49-
'api_version': 'v1alpha'
50-
},
51-
},
52-
),
53-
exception_if_vertex='404',
54-
),
5535
]
5636

5737

@@ -63,60 +43,22 @@
6343
)
6444

6545

66-
def test_thought_signature_with_thinking_budget(client):
67-
with pytest_helper.exception_if_vertex(client, errors.ClientError):
46+
def test_thinking_budget(client):
6847
response = client.models.generate_content(
69-
model='gemini-2.5-pro-preview-03-25',
48+
model='gemini-2.5-pro',
7049
contents='What is the sum of natural numbers from 1 to 100?',
7150
config={
7251
'thinking_config': {
7352
'include_thoughts': True,
7453
'thinking_budget': 10000,
7554
},
76-
'http_options': {'api_version': 'v1alpha'},
7755
},
7856
)
7957
has_thought = False
8058
if response.candidates:
81-
for candidate in response.candidates:
82-
for part in candidate.content.parts:
83-
if part.thought:
84-
has_thought = True
85-
break
59+
for candidate in response.candidates:
60+
for part in candidate.content.parts:
61+
if part.thought:
62+
has_thought = True
63+
break
8664
assert has_thought
87-
88-
89-
def test_thought_with_include_thoughts_v1alpha(client):
90-
# Thoughts have been disabled in the API.
91-
with pytest_helper.exception_if_vertex(client, errors.ClientError):
92-
response = client.models.generate_content(
93-
model='gemini-2.0-flash-thinking-exp',
94-
contents='What is the sum of natural numbers from 1 to 100?',
95-
config={
96-
'thinking_config': {'include_thoughts': True},
97-
'http_options': {'api_version': 'v1alpha'},
98-
},
99-
)
100-
has_thought = False
101-
if response.candidates:
102-
for candidate in response.candidates:
103-
for part in candidate.content.parts:
104-
if part.thought:
105-
has_thought = True
106-
break
107-
assert has_thought
108-
109-
110-
def test_no_thought_with_default_config(client):
111-
with pytest_helper.exception_if_vertex(client, errors.ClientError):
112-
response = client.models.generate_content(
113-
model='gemini-2.0-flash-thinking-exp',
114-
contents='What is the sum of natural numbers from 1 to 100?',
115-
)
116-
has_thought = False
117-
for candidate in response.candidates:
118-
for part in candidate.content.parts:
119-
if part.thought:
120-
has_thought = True
121-
break
122-
assert not has_thought

google/genai/tests/models/test_generate_content_tools.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ def divide_floats(a: float, b: float) -> float:
286286
pytest_helper.TestTableItem(
287287
name='test_url_context',
288288
parameters=types._GenerateContentParameters(
289-
model='gemini-2.5-flash-preview-04-17',
289+
model='gemini-2.5-flash',
290290
contents=t.t_contents(
291291
'what are the top headlines on https://news.google.com'
292292
),
@@ -444,7 +444,7 @@ def customized_divide_integers(numerator: int, denominator: int) -> int:
444444
return numerator // denominator + 1
445445

446446
response = client.models.generate_content(
447-
model='gemini-2.5-flash-preview-04-17',
447+
model='gemini-2.5-flash',
448448
contents='what is the result of 1000/2?',
449449
config={
450450
'tools': [customized_divide_integers],
@@ -843,7 +843,7 @@ def describe_cities(
843843

844844
response = client.models.generate_content(
845845
model='gemini-1.5-flash',
846-
contents='Can you describe the city of San Francisco?',
846+
contents='Can you describe the city of San Francisco, USA?',
847847
config={
848848
'tools': [describe_cities],
849849
'automatic_function_calling': {'ignore_call_history': True},

google/genai/types.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5124,11 +5124,6 @@ class GenerateContentResponse(_common.BaseModel):
51245124
description="""Timestamp when the request is made to the server.
51255125
""",
51265126
)
5127-
response_id: Optional[str] = Field(
5128-
default=None,
5129-
description="""Identifier for each response.
5130-
""",
5131-
)
51325127
model_version: Optional[str] = Field(
51335128
default=None,
51345129
description="""Output only. The model version used to generate the response.""",
@@ -5137,6 +5132,10 @@ class GenerateContentResponse(_common.BaseModel):
51375132
default=None,
51385133
description="""Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations.""",
51395134
)
5135+
response_id: Optional[str] = Field(
5136+
default=None,
5137+
description="""Output only. response_id is used to identify each response. It is the encoding of the event_id.""",
5138+
)
51405139
usage_metadata: Optional[GenerateContentResponseUsageMetadata] = Field(
51415140
default=None, description="""Usage metadata about the response(s)."""
51425141
)
@@ -5383,16 +5382,15 @@ class GenerateContentResponseDict(TypedDict, total=False):
53835382
"""Timestamp when the request is made to the server.
53845383
"""
53855384

5386-
response_id: Optional[str]
5387-
"""Identifier for each response.
5388-
"""
5389-
53905385
model_version: Optional[str]
53915386
"""Output only. The model version used to generate the response."""
53925387

53935388
prompt_feedback: Optional[GenerateContentResponsePromptFeedbackDict]
53945389
"""Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations."""
53955390

5391+
response_id: Optional[str]
5392+
"""Output only. response_id is used to identify each response. It is the encoding of the event_id."""
5393+
53965394
usage_metadata: Optional[GenerateContentResponseUsageMetadataDict]
53975395
"""Usage metadata about the response(s)."""
53985396

0 commit comments

Comments
 (0)