mirror of
https://github.com/open-telemetry/opentelemetry-python-contrib.git
synced 2025-07-31 22:23:12 +08:00

* Fix install of Python 3.10 on GitHub Actions In PR #1604 the Python version was upgraded to Python 3.10 to fix a local issue on M1 MacBooks. The GitHub Action workflows now exit with the following message for the docker-tests, spellcheck and lint checks, skipping these checks. ``` lint create: /home/runner/work/opentelemetry-python-contrib/opentelemetry-python-contrib/.tox/lint SKIPPED: InterpreterNotFound: python3.10 ___________________________________ summary ____________________________________ SKIPPED: lint: InterpreterNotFound: python3.10 congratulations :) ``` Upgrade the Python version in the GitHub Actions workflow to fix this. * Fix YAML interpretation of Python 3.10 * Upgrade Docker tests dependencies Upgrade the asyncpg and psycopg2 packages, they don't work on Python 3.10. This also fixes running these tests no M1 MacBooks. * Fix linter issues merged into main They went unnoticed while the CI didn't fail on the lint task not working. --------- Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
115 lines
3.1 KiB
Python
115 lines
3.1 KiB
Python
# Copyright The OpenTelemetry Authors
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import logging
|
|
import random
|
|
import sys
|
|
import time
|
|
from logging import INFO
|
|
|
|
import psutil
|
|
|
|
from opentelemetry import metrics
|
|
from opentelemetry.exporter.prometheus_remote_write import (
|
|
PrometheusRemoteWriteMetricsExporter,
|
|
)
|
|
from opentelemetry.metrics import Observation
|
|
from opentelemetry.sdk.metrics import MeterProvider
|
|
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
|
|
|
|
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
testing_labels = {"environment": "testing"}
|
|
|
|
exporter = PrometheusRemoteWriteMetricsExporter(
|
|
endpoint="http://cortex:9009/api/prom/push",
|
|
headers={"X-Scope-Org-ID": "5"},
|
|
)
|
|
reader = PeriodicExportingMetricReader(exporter, 1000)
|
|
provider = MeterProvider(metric_readers=[reader])
|
|
metrics.set_meter_provider(provider)
|
|
meter = metrics.get_meter(__name__)
|
|
|
|
|
|
# Callback to gather cpu usage
|
|
def get_cpu_usage_callback(observer):
|
|
for number, percent in enumerate(psutil.cpu_percent(percpu=True)):
|
|
labels = {"cpu_number": str(number)}
|
|
yield Observation(percent, labels)
|
|
|
|
|
|
# Callback to gather RAM usage
|
|
def get_ram_usage_callback(observer):
|
|
ram_percent = psutil.virtual_memory().percent
|
|
yield Observation(ram_percent, {})
|
|
|
|
|
|
requests_counter = meter.create_counter(
|
|
name="requests",
|
|
description="number of requests",
|
|
unit="1",
|
|
)
|
|
|
|
request_min_max = meter.create_counter(
|
|
name="requests_min_max",
|
|
description="min max sum count of requests",
|
|
unit="1",
|
|
)
|
|
|
|
request_last_value = meter.create_counter(
|
|
name="requests_last_value",
|
|
description="last value number of requests",
|
|
unit="1",
|
|
)
|
|
|
|
requests_active = meter.create_up_down_counter(
|
|
name="requests_active",
|
|
description="number of active requests",
|
|
unit="1",
|
|
)
|
|
|
|
meter.create_observable_counter(
|
|
callbacks=[get_ram_usage_callback],
|
|
name="ram_usage",
|
|
description="ram usage",
|
|
unit="1",
|
|
)
|
|
|
|
meter.create_observable_up_down_counter(
|
|
callbacks=[get_cpu_usage_callback],
|
|
name="cpu_percent",
|
|
description="per-cpu usage",
|
|
unit="1",
|
|
)
|
|
|
|
request_latency = meter.create_histogram("request_latency")
|
|
|
|
# Load generator
|
|
num = random.randint(0, 1000)
|
|
while True:
|
|
# counters
|
|
requests_counter.add(num % 131 + 200, testing_labels)
|
|
request_min_max.add(num % 181 + 200, testing_labels)
|
|
request_last_value.add(num % 101 + 200, testing_labels)
|
|
|
|
# updown counter
|
|
requests_active.add(num % 7231 + 200, testing_labels)
|
|
|
|
request_latency.record(num % 92, testing_labels)
|
|
logger.log(level=INFO, msg="completed metrics collection cycle")
|
|
time.sleep(1)
|
|
num += 9791
|