mirror of
https://github.com/open-telemetry/opentelemetry-python-contrib.git
synced 2025-07-31 14:11:50 +08:00
Ensure that the Google GenAI SDK instrumentation correctly populates "finish_reasons" on the span. (#3417)
* Fix bug where 'gen_ai.response.finish_reasons' was not being correctly populated. * Update changelog. * Reformat with ruff.
This commit is contained in:
@ -9,6 +9,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
- Restructure tests to keep in line with repository conventions ([#3344](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3344))
|
||||
|
||||
- Fix [bug](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3416) where
|
||||
span attribute `gen_ai.response.finish_reasons` is empty ([#3417](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3417))
|
||||
|
||||
## Version 0.1b0 (2025-03-05)
|
||||
|
||||
- Add support for async and streaming.
|
||||
|
@ -252,6 +252,7 @@ class _GenerateContentInstrumentationHelper:
|
||||
# need to be reflected back into the span attributes.
|
||||
#
|
||||
# See also: TODOS.md.
|
||||
self._update_finish_reasons(response)
|
||||
self._maybe_update_token_counts(response)
|
||||
self._maybe_update_error_type(response)
|
||||
self._maybe_log_response(response)
|
||||
@ -275,6 +276,18 @@ class _GenerateContentInstrumentationHelper:
|
||||
self._record_token_usage_metric()
|
||||
self._record_duration_metric()
|
||||
|
||||
def _update_finish_reasons(self, response):
|
||||
if not response.candidates:
|
||||
return
|
||||
for candidate in response.candidates:
|
||||
finish_reason = candidate.finish_reason
|
||||
if finish_reason is None:
|
||||
continue
|
||||
finish_reason_str = finish_reason.name.lower().removeprefix(
|
||||
"finish_reason_"
|
||||
)
|
||||
self._finish_reasons_set.add(finish_reason_str)
|
||||
|
||||
def _maybe_update_token_counts(self, response: GenerateContentResponse):
|
||||
input_tokens = _get_response_property(
|
||||
response, "usage_metadata.prompt_token_count"
|
||||
|
@ -0,0 +1,143 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from google.genai import types as genai_types
|
||||
|
||||
from .base import TestCase
|
||||
|
||||
|
||||
class FinishReasonsTestCase(TestCase):
|
||||
def generate_and_get_span_finish_reasons(self):
|
||||
self.client.models.generate_content(
|
||||
model="gemini-2.5-flash-001", contents="Some prompt"
|
||||
)
|
||||
span = self.otel.get_span_named(
|
||||
"generate_content gemini-2.5-flash-001"
|
||||
)
|
||||
assert span is not None
|
||||
assert "gen_ai.response.finish_reasons" in span.attributes
|
||||
return list(span.attributes["gen_ai.response.finish_reasons"])
|
||||
|
||||
def test_single_candidate_with_valid_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
)
|
||||
)
|
||||
self.assertEqual(self.generate_and_get_span_finish_reasons(), ["stop"])
|
||||
|
||||
def test_single_candidate_with_safety_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.SAFETY
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["safety"]
|
||||
)
|
||||
|
||||
def test_single_candidate_with_max_tokens_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["max_tokens"]
|
||||
)
|
||||
|
||||
def test_single_candidate_with_no_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(finish_reason=None)
|
||||
)
|
||||
self.assertEqual(self.generate_and_get_span_finish_reasons(), [])
|
||||
|
||||
def test_single_candidate_with_unspecified_reason(self):
|
||||
self.configure_valid_response(
|
||||
candidate=genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.FINISH_REASON_UNSPECIFIED
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["unspecified"]
|
||||
)
|
||||
|
||||
def test_multiple_candidates_with_valid_reasons(self):
|
||||
self.configure_valid_response(
|
||||
candidates=[
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(), ["max_tokens", "stop"]
|
||||
)
|
||||
|
||||
def test_sorts_finish_reasons(self):
|
||||
self.configure_valid_response(
|
||||
candidates=[
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.SAFETY
|
||||
),
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(),
|
||||
["max_tokens", "safety", "stop"],
|
||||
)
|
||||
|
||||
def test_deduplicates_finish_reasons(self):
|
||||
self.configure_valid_response(
|
||||
candidates=[
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.MAX_TOKENS
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.SAFETY
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
genai_types.Candidate(
|
||||
finish_reason=genai_types.FinishReason.STOP
|
||||
),
|
||||
]
|
||||
)
|
||||
self.assertEqual(
|
||||
self.generate_and_get_span_finish_reasons(),
|
||||
["max_tokens", "safety", "stop"],
|
||||
)
|
Reference in New Issue
Block a user