mirror of
https://github.com/open-telemetry/opentelemetry-python-contrib.git
synced 2025-08-01 17:34:38 +08:00
OpenAI instrumentation docs fixes (#2988)
* Add openai docs config and improve readme * up * Add manual sample, add no-content tests * update headers * lint * use grpc endpoint in openai samples, add extra env vars to readme * move distro fix to another PR * nits * Ignore examples for pylint * Update .pylintrc * ignroe lint for example * Fix README docs * Update openai.rst * Update conf.py * Update docs-requirements.txt * docs --------- Co-authored-by: Leighton Chen <lechen@microsoft.com> Co-authored-by: Riccardo Magliocchetti <riccardo.magliocchetti@gmail.com>
This commit is contained in:
@ -33,9 +33,11 @@ elasticsearch>=6.0,<9.0
|
||||
flask~=2.0
|
||||
falcon~=2.0
|
||||
grpcio~=1.27
|
||||
httpx>=0.18.0
|
||||
kafka-python>=2.0,<3.0
|
||||
mysql-connector-python~=8.0
|
||||
mysqlclient~=2.1.1
|
||||
openai >= 1.26.0
|
||||
psutil>=5
|
||||
psycopg~=3.1.17
|
||||
pika>=0.12.0
|
||||
@ -47,7 +49,6 @@ remoulade>=0.50
|
||||
sqlalchemy>=1.0
|
||||
tornado>=5.1.1
|
||||
tortoise-orm>=0.17.0
|
||||
httpx>=0.18.0
|
||||
|
||||
# indirect dependency pins
|
||||
markupsafe==2.0.1
|
||||
|
16
docs/conf.py
16
docs/conf.py
@ -40,6 +40,13 @@ instr_dirs = [
|
||||
if isdir(join(instr, f))
|
||||
]
|
||||
|
||||
instr_genai = "../instrumentation-genai"
|
||||
instr_genai_dirs = [
|
||||
os.path.abspath("/".join(["../instrumentation-genai", f, "src"]))
|
||||
for f in listdir(instr_genai)
|
||||
if isdir(join(instr_genai, f))
|
||||
]
|
||||
|
||||
prop = "../propagator"
|
||||
prop_dirs = [
|
||||
os.path.abspath("/".join([prop, f, "src"]))
|
||||
@ -60,7 +67,14 @@ resource_dirs = [
|
||||
for f in listdir(resource)
|
||||
if isdir(join(resource, f))
|
||||
]
|
||||
sys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs + prop_dirs + resource_dirs
|
||||
sys.path[:0] = (
|
||||
exp_dirs
|
||||
+ instr_dirs
|
||||
+ instr_genai_dirs
|
||||
+ sdk_ext_dirs
|
||||
+ prop_dirs
|
||||
+ resource_dirs
|
||||
)
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
|
@ -50,6 +50,7 @@ install <https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs>
|
||||
cd opentelemetry-python-contrib
|
||||
pip install -e ./instrumentation/opentelemetry-instrumentation-flask
|
||||
pip install -e ./instrumentation/opentelemetry-instrumentation-botocore
|
||||
pip install -e ./instrumentation-genai/opentelemetry-instrumentation-openai-v2
|
||||
pip install -e ./sdk-extension/opentelemetry-sdk-extension-aws
|
||||
pip install -e ./resource/opentelemetry-resource-detector-container
|
||||
|
||||
@ -62,6 +63,14 @@ install <https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs>
|
||||
|
||||
instrumentation/**
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: OpenTelemetry Generative AI Instrumentations
|
||||
:name: Generative AI Instrumentations
|
||||
:glob:
|
||||
|
||||
instrumentation-genai/**
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: OpenTelemetry Propagators
|
||||
|
7
docs/instrumentation-genai/openai.rst
Normal file
7
docs/instrumentation-genai/openai.rst
Normal file
@ -0,0 +1,7 @@
|
||||
OpenTelemetry Python - OpenAI Instrumentation
|
||||
=============================================
|
||||
|
||||
.. automodule:: opentelemetry.instrumentation.openai_v2
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -24,6 +24,7 @@ py-class=
|
||||
httpx.Client
|
||||
httpx.AsyncClient
|
||||
httpx.BaseTransport
|
||||
openai.BaseTransport
|
||||
httpx.AsyncBaseTransport
|
||||
httpx.SyncByteStream
|
||||
httpx.AsyncByteStream
|
||||
|
@ -19,8 +19,60 @@ package to your requirements.
|
||||
|
||||
pip install opentelemetry-instrumentation-openai-v2
|
||||
|
||||
If you don't have an OpenAI application, yet, try our `example <example>`_
|
||||
which only needs a valid OpenAI API key.
|
||||
If you don't have an OpenAI application, yet, try our `examples <examples>`_
|
||||
which only need a valid OpenAI API key.
|
||||
|
||||
Check out `zero-code example <examples/zero-code>`_ for a quick start.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
This section describes how to set up OpenAI instrumentation if you're setting OpenTelemetry up manually.
|
||||
Check out the `manual example <examples/manual>`_ for more details.
|
||||
|
||||
Instrumenting all clients
|
||||
*************************
|
||||
|
||||
When using the instrumentor, all clients will automatically trace OpenAI chat completion operations.
|
||||
You can also optionally capture prompts and completions as log events.
|
||||
|
||||
Make sure to configure OpenTelemetry tracing, logging, and events to capture all telemetry emitted by the instrumentation.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
|
||||
|
||||
OpenAIInstrumentor().instrument()
|
||||
|
||||
client = OpenAI()
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-4o-mini",
|
||||
messages=[
|
||||
{"role": "user", "content": "Write a short poem on open telemetry."},
|
||||
],
|
||||
)
|
||||
|
||||
Enabling message content
|
||||
*************************
|
||||
|
||||
Message content such as the contents of the prompt, completion, function arguments and return values
|
||||
are not captured by default. To capture message content as log events, set the environment variable
|
||||
`OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` to `true`.
|
||||
|
||||
Uninstrument
|
||||
************
|
||||
|
||||
To uninstrument clients, call the uninstrument method:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
|
||||
|
||||
OpenAIInstrumentor().instrument()
|
||||
# ...
|
||||
|
||||
# Uninstrument all clients
|
||||
OpenAIInstrumentor().uninstrument()
|
||||
|
||||
References
|
||||
----------
|
||||
|
@ -0,0 +1,16 @@
|
||||
# Update this with your real OpenAI API key
|
||||
OPENAI_API_KEY=sk-YOUR_API_KEY
|
||||
|
||||
# Uncomment to use Ollama instead of OpenAI
|
||||
# OPENAI_BASE_URL=http://localhost:11434/v1
|
||||
# OPENAI_API_KEY=unused
|
||||
# CHAT_MODEL=qwen2.5:0.5b
|
||||
|
||||
# Uncomment and change to your OTLP endpoint
|
||||
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
|
||||
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
|
||||
|
||||
OTEL_SERVICE_NAME=opentelemetry-python-openai
|
||||
|
||||
# Change to 'false' to hide prompt and completion content
|
||||
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
|
@ -1,8 +1,7 @@
|
||||
OpenTelemetry OpenAI Instrumentation Example
|
||||
============================================
|
||||
|
||||
This is an example of how to instrument OpenAI calls with zero code changes,
|
||||
using `opentelemetry-instrument`.
|
||||
This is an example of how to instrument OpenAI calls when configuring OpenTelemetry SDK and Instrumentations manually.
|
||||
|
||||
When `main.py <main.py>`_ is run, it exports traces and logs to an OTLP
|
||||
compatible endpoint. Traces include details such as the model used and the
|
||||
@ -10,12 +9,18 @@ duration of the chat request. Logs capture the chat request and the generated
|
||||
response, providing a comprehensive view of the performance and behavior of
|
||||
your OpenAI requests.
|
||||
|
||||
Note: `.env <.env>`_ file configures additional environment variables:
|
||||
|
||||
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true` configures
|
||||
OpenAI instrumentation to capture prompt and completion contents on
|
||||
events.
|
||||
|
||||
Setup
|
||||
-----
|
||||
|
||||
Minimally, update the `.env <.env>`_ file with your "OPENAI_API_KEY". An
|
||||
OTLP compatible endpoint should be listening for traces and logs on
|
||||
http://localhost:4318. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
|
||||
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
|
||||
|
||||
Next, set up a virtual environment like this:
|
||||
|
||||
@ -33,7 +38,7 @@ Run the example like this:
|
||||
|
||||
::
|
||||
|
||||
dotenv run -- opentelemetry-instrument python main.py
|
||||
dotenv run -- python main.py
|
||||
|
||||
You should see a poem generated by OpenAI while traces and logs export to your
|
||||
configured observability tool.
|
@ -0,0 +1,53 @@
|
||||
# pylint: skip-file
|
||||
import os
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
# NOTE: OpenTelemetry Python Logs and Events APIs are in beta
|
||||
from opentelemetry import _events, _logs, trace
|
||||
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
|
||||
OTLPLogExporter,
|
||||
)
|
||||
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
|
||||
from opentelemetry.sdk._events import EventLoggerProvider
|
||||
from opentelemetry.sdk._logs import LoggerProvider
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
|
||||
# configure tracing
|
||||
trace.set_tracer_provider(TracerProvider())
|
||||
trace.get_tracer_provider().add_span_processor(
|
||||
BatchSpanProcessor(OTLPSpanExporter())
|
||||
)
|
||||
|
||||
# configure logging and events
|
||||
_logs.set_logger_provider(LoggerProvider())
|
||||
_logs.get_logger_provider().add_log_record_processor(
|
||||
BatchLogRecordProcessor(OTLPLogExporter())
|
||||
)
|
||||
_events.set_event_logger_provider(EventLoggerProvider())
|
||||
|
||||
# instrument OpenAI
|
||||
OpenAIInstrumentor().instrument()
|
||||
|
||||
|
||||
def main():
|
||||
client = OpenAI()
|
||||
chat_completion = client.chat.completions.create(
|
||||
model=os.getenv("CHAT_MODEL", "gpt-4o-mini"),
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Write a short poem on OpenTelemetry.",
|
||||
},
|
||||
],
|
||||
)
|
||||
print(chat_completion.choices[0].message.content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,5 @@
|
||||
openai~=1.54.4
|
||||
|
||||
opentelemetry-sdk~=1.28.2
|
||||
opentelemetry-exporter-otlp-proto-grpc~=1.28.2
|
||||
opentelemetry-instrumentation-openai-v2~=2.0b0
|
@ -6,13 +6,16 @@ OPENAI_API_KEY=sk-YOUR_API_KEY
|
||||
# OPENAI_API_KEY=unused
|
||||
# CHAT_MODEL=qwen2.5:0.5b
|
||||
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318
|
||||
OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
|
||||
# Uncomment and change to your OTLP endpoint
|
||||
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
|
||||
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
|
||||
|
||||
OTEL_SERVICE_NAME=opentelemetry-python-openai
|
||||
|
||||
# Change to 'false' to disable logging
|
||||
OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
|
||||
# Change to 'console' if your OTLP endpoint doesn't support logs
|
||||
OTEL_LOGS_EXPORTER=otlp_proto_http
|
||||
# TODO: this should not be necessary once https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3042 is released
|
||||
OTEL_LOGS_EXPORTER=otlp
|
||||
# Change to 'false' to hide prompt and completion content
|
||||
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
|
@ -0,0 +1,48 @@
|
||||
OpenTelemetry OpenAI Zero-Code Instrumentation Example
|
||||
======================================================
|
||||
|
||||
This is an example of how to instrument OpenAI calls with zero code changes,
|
||||
using `opentelemetry-instrument`.
|
||||
|
||||
When `main.py <main.py>`_ is run, it exports traces and logs to an OTLP
|
||||
compatible endpoint. Traces include details such as the model used and the
|
||||
duration of the chat request. Logs capture the chat request and the generated
|
||||
response, providing a comprehensive view of the performance and behavior of
|
||||
your OpenAI requests.
|
||||
|
||||
Note: `.env <.env>`_ file configures additional environment variables:
|
||||
|
||||
- `OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true` configures
|
||||
OpenTelemetry SDK to export logs and events.
|
||||
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true` configures
|
||||
OpenAI instrumentation to capture prompt and completion contents on
|
||||
events.
|
||||
- `OTEL_LOGS_EXPORTER=otlp` to specify exporter type.
|
||||
|
||||
Setup
|
||||
-----
|
||||
|
||||
Minimally, update the `.env <.env>`_ file with your "OPENAI_API_KEY". An
|
||||
OTLP compatible endpoint should be listening for traces and logs on
|
||||
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
|
||||
|
||||
Next, set up a virtual environment like this:
|
||||
|
||||
::
|
||||
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install "python-dotenv[cli]"
|
||||
pip install -r requirements.txt
|
||||
|
||||
Run
|
||||
---
|
||||
|
||||
Run the example like this:
|
||||
|
||||
::
|
||||
|
||||
dotenv run -- opentelemetry-instrument python main.py
|
||||
|
||||
You should see a poem generated by OpenAI while traces and logs export to your
|
||||
configured observability tool.
|
@ -1,6 +1,6 @@
|
||||
openai~=1.54.4
|
||||
|
||||
opentelemetry-sdk~=1.28.2
|
||||
opentelemetry-exporter-otlp-proto-http~=1.28.2
|
||||
opentelemetry-exporter-otlp-proto-grpc~=1.28.2
|
||||
opentelemetry-distro~=0.49b2
|
||||
opentelemetry-instrumentation-openai-v2~=2.0b0
|
@ -39,7 +39,7 @@ instruments = [
|
||||
openai = "opentelemetry.instrumentation.openai_v2:OpenAIInstrumentor"
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-openai-v2"
|
||||
Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-openai-v2"
|
||||
|
||||
[tool.hatch.version]
|
||||
path = "src/opentelemetry/instrumentation/openai_v2/version.py"
|
||||
|
@ -8,6 +8,7 @@ pytest==7.4.4
|
||||
pytest-vcr==1.0.2
|
||||
pytest-asyncio==0.21.0
|
||||
wrapt==1.16.0
|
||||
opentelemetry-exporter-otlp-proto-http~=1.28
|
||||
opentelemetry-api==1.28 # when updating, also update in pyproject.toml
|
||||
opentelemetry-sdk==1.28 # when updating, also update in pyproject.toml
|
||||
opentelemetry-semantic-conventions==0.49b0 # when updating, also update in pyproject.toml
|
||||
|
@ -0,0 +1,132 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Say this is a test"
|
||||
}
|
||||
],
|
||||
"model": "gpt-4o-mini",
|
||||
"stream": false
|
||||
}
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer test_openai_api_key
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '106'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- AsyncOpenAI/Python 1.26.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.26.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"id": "chatcmpl-ASv9R2E7Yhb2e7bj4Xl0qm9s3J42Y",
|
||||
"object": "chat.completion",
|
||||
"created": 1731456237,
|
||||
"model": "gpt-4o-mini-2024-07-18",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "This is a test. How can I assist you further?",
|
||||
"refusal": null
|
||||
},
|
||||
"logprobs": null,
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 12,
|
||||
"completion_tokens": 12,
|
||||
"total_tokens": 24,
|
||||
"prompt_tokens_details": {
|
||||
"cached_tokens": 0,
|
||||
"audio_tokens": 0
|
||||
},
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": 0,
|
||||
"audio_tokens": 0,
|
||||
"accepted_prediction_tokens": 0,
|
||||
"rejected_prediction_tokens": 0
|
||||
}
|
||||
},
|
||||
"system_fingerprint": "fp_0ba0d124f1"
|
||||
}
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8e1a80679a8311a6-MRS
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 13 Nov 2024 00:03:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie: test_set_cookie
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
content-length:
|
||||
- '796'
|
||||
openai-organization: test_openai_org_id
|
||||
openai-processing-ms:
|
||||
- '359'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999978'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_41ea134c1fc450d4ca4cf8d0c6a7c53a
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
@ -0,0 +1,134 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: |-
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Say this is a test"
|
||||
}
|
||||
],
|
||||
"model": "gpt-4o-mini",
|
||||
"stream": false
|
||||
}
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- Bearer test_openai_api_key
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '106'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.54.3
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.54.3
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.6
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: |-
|
||||
{
|
||||
"id": "chatcmpl-ASYMQRl3A3DXL9FWCK9tnGRcKIO7q",
|
||||
"object": "chat.completion",
|
||||
"created": 1731368630,
|
||||
"model": "gpt-4o-mini-2024-07-18",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "This is a test.",
|
||||
"refusal": null
|
||||
},
|
||||
"logprobs": null,
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 12,
|
||||
"completion_tokens": 5,
|
||||
"total_tokens": 17,
|
||||
"prompt_tokens_details": {
|
||||
"cached_tokens": 0,
|
||||
"audio_tokens": 0
|
||||
},
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": 0,
|
||||
"audio_tokens": 0,
|
||||
"accepted_prediction_tokens": 0,
|
||||
"rejected_prediction_tokens": 0
|
||||
}
|
||||
},
|
||||
"system_fingerprint": "fp_0ba0d124f1"
|
||||
}
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8e122593ff368bc8-SIN
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 11 Nov 2024 23:43:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie: test_set_cookie
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
content-length:
|
||||
- '765'
|
||||
openai-organization: test_openai_org_id
|
||||
openai-processing-ms:
|
||||
- '287'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199977'
|
||||
x-ratelimit-reset-requests:
|
||||
- 8.64s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 6ms
|
||||
x-request-id:
|
||||
- req_58cff97afd0e7c0bba910ccf0b044a6f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
@ -84,6 +84,10 @@ def vcr_config():
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def instrument_no_content(tracer_provider, event_logger_provider):
|
||||
os.environ.update(
|
||||
{OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "False"}
|
||||
)
|
||||
|
||||
instrumentor = OpenAIInstrumentor()
|
||||
instrumentor.instrument(
|
||||
tracer_provider=tracer_provider,
|
||||
@ -91,6 +95,7 @@ def instrument_no_content(tracer_provider, event_logger_provider):
|
||||
)
|
||||
|
||||
yield instrumentor
|
||||
os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None)
|
||||
instrumentor.uninstrument()
|
||||
|
||||
|
||||
|
@ -68,6 +68,34 @@ async def test_async_chat_completion_with_content(
|
||||
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio()
|
||||
async def test_async_chat_completion_no_content(
|
||||
span_exporter, log_exporter, async_openai_client, instrument_no_content
|
||||
):
|
||||
llm_model_value = "gpt-4o-mini"
|
||||
messages_value = [{"role": "user", "content": "Say this is a test"}]
|
||||
|
||||
response = await async_openai_client.chat.completions.create(
|
||||
messages=messages_value, model=llm_model_value, stream=False
|
||||
)
|
||||
|
||||
spans = span_exporter.get_finished_spans()
|
||||
assert_completion_attributes(spans[0], llm_model_value, response)
|
||||
|
||||
logs = log_exporter.get_finished_logs()
|
||||
assert len(logs) == 2
|
||||
|
||||
assert_message_in_logs(logs[0], "gen_ai.user.message", None, spans[0])
|
||||
|
||||
choice_event = {
|
||||
"index": 0,
|
||||
"finish_reason": "stop",
|
||||
"message": {"role": "assistant"},
|
||||
}
|
||||
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
|
||||
|
||||
|
||||
@pytest.mark.asyncio()
|
||||
async def test_async_chat_completion_bad_endpoint(
|
||||
span_exporter, instrument_no_content
|
||||
|
@ -67,6 +67,33 @@ def test_chat_completion_with_content(
|
||||
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_chat_completion_no_content(
|
||||
span_exporter, log_exporter, openai_client, instrument_no_content
|
||||
):
|
||||
llm_model_value = "gpt-4o-mini"
|
||||
messages_value = [{"role": "user", "content": "Say this is a test"}]
|
||||
|
||||
response = openai_client.chat.completions.create(
|
||||
messages=messages_value, model=llm_model_value, stream=False
|
||||
)
|
||||
|
||||
spans = span_exporter.get_finished_spans()
|
||||
assert_completion_attributes(spans[0], llm_model_value, response)
|
||||
|
||||
logs = log_exporter.get_finished_logs()
|
||||
assert len(logs) == 2
|
||||
|
||||
assert_message_in_logs(logs[0], "gen_ai.user.message", None, spans[0])
|
||||
|
||||
choice_event = {
|
||||
"index": 0,
|
||||
"finish_reason": "stop",
|
||||
"message": {"role": "assistant"},
|
||||
}
|
||||
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
|
||||
|
||||
|
||||
def test_chat_completion_bad_endpoint(span_exporter, instrument_no_content):
|
||||
llm_model_value = "gpt-4o-mini"
|
||||
messages_value = [{"role": "user", "content": "Say this is a test"}]
|
||||
|
Reference in New Issue
Block a user