build(deps): bump opentelemetry crates to 0.27 (#6774)

This commit is contained in:
Sanchith Hegde
2024-12-10 16:35:34 +05:30
committed by GitHub
parent a52828296a
commit 47a3d2b2ab
86 changed files with 739 additions and 1086 deletions

View File

@ -74,7 +74,7 @@ impl Handler {
let jobs_picked = Arc::new(atomic::AtomicU8::new(0));
while self.running.load(atomic::Ordering::SeqCst) {
metrics::DRAINER_HEALTH.add(&metrics::CONTEXT, 1, &[]);
metrics::DRAINER_HEALTH.add(1, &[]);
for store in self.stores.values() {
if store.is_stream_available(stream_index).await {
let _task_handle = tokio::spawn(
@ -103,7 +103,7 @@ impl Handler {
pub(crate) async fn shutdown_listener(&self, mut rx: mpsc::Receiver<()>) {
while let Some(_c) = rx.recv().await {
logger::info!("Awaiting shutdown!");
metrics::SHUTDOWN_SIGNAL_RECEIVED.add(&metrics::CONTEXT, 1, &[]);
metrics::SHUTDOWN_SIGNAL_RECEIVED.add(1, &[]);
let shutdown_started = time::Instant::now();
rx.close();
@ -112,9 +112,9 @@ impl Handler {
time::sleep(self.shutdown_interval).await;
}
logger::info!("Terminating drainer");
metrics::SUCCESSFUL_SHUTDOWN.add(&metrics::CONTEXT, 1, &[]);
metrics::SUCCESSFUL_SHUTDOWN.add(1, &[]);
let shutdown_ended = shutdown_started.elapsed().as_secs_f64() * 1000f64;
metrics::CLEANUP_TIME.record(&metrics::CONTEXT, shutdown_ended, &[]);
metrics::CLEANUP_TIME.record(shutdown_ended, &[]);
self.close();
}
logger::info!(
@ -217,7 +217,7 @@ async fn drainer(
if let redis_interface::errors::RedisError::StreamEmptyOrNotAvailable =
redis_err.current_context()
{
metrics::STREAM_EMPTY.add(&metrics::CONTEXT, 1, &[]);
metrics::STREAM_EMPTY.add(1, &[]);
return Ok(());
} else {
return Err(error);
@ -236,12 +236,8 @@ async fn drainer(
let read_count = entries.len();
metrics::JOBS_PICKED_PER_STREAM.add(
&metrics::CONTEXT,
u64::try_from(read_count).unwrap_or(u64::MIN),
&[metrics::KeyValue {
key: "stream".into(),
value: stream_name.to_string().into(),
}],
router_env::metric_attributes!(("stream", stream_name.to_owned())),
);
let session_id = common_utils::generate_id_with_default_len("drainer_session");
@ -254,12 +250,8 @@ async fn drainer(
Err(err) => {
logger::error!(operation = "deserialization", err=?err);
metrics::STREAM_PARSE_FAIL.add(
&metrics::CONTEXT,
1,
&[metrics::KeyValue {
key: "operation".into(),
value: "deserialization".into(),
}],
router_env::metric_attributes!(("operation", "deserialization")),
);
// break from the loop in case of a deser error

View File

@ -1,2 +1,2 @@
#[doc(inline)]
pub use router_env::*;
pub use router_env::{debug, error, info, warn};

View File

@ -1,8 +1,6 @@
use std::collections::HashMap;
use drainer::{
errors::DrainerResult, logger::logger, services, settings, start_drainer, start_web_server,
};
use drainer::{errors::DrainerResult, logger, services, settings, start_drainer, start_web_server};
use router_env::tracing::Instrument;
#[tokio::main]

View File

@ -1,9 +1,5 @@
pub use router_env::opentelemetry::KeyValue;
use router_env::{
counter_metric, global_meter, histogram_metric, histogram_metric_i64, metrics_context,
};
use router_env::{counter_metric, global_meter, histogram_metric_f64, histogram_metric_u64};
metrics_context!(CONTEXT);
global_meter!(DRAINER_METER, "DRAINER");
counter_metric!(JOBS_PICKED_PER_STREAM, DRAINER_METER);
@ -17,8 +13,8 @@ counter_metric!(STREAM_EMPTY, DRAINER_METER);
counter_metric!(STREAM_PARSE_FAIL, DRAINER_METER);
counter_metric!(DRAINER_HEALTH, DRAINER_METER);
histogram_metric!(QUERY_EXECUTION_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric!(REDIS_STREAM_READ_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric!(REDIS_STREAM_TRIM_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric!(CLEANUP_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric_i64!(DRAINER_DELAY_SECONDS, DRAINER_METER); // Time in (s) seconds
histogram_metric_f64!(QUERY_EXECUTION_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric_f64!(REDIS_STREAM_READ_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric_f64!(REDIS_STREAM_TRIM_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric_f64!(CLEANUP_TIME, DRAINER_METER); // Time in (ms) milliseconds
histogram_metric_u64!(DRAINER_DELAY_SECONDS, DRAINER_METER); // Time in (s) seconds

View File

@ -25,32 +25,23 @@ impl ExecuteQuery for kv::DBOperation {
let operation = self.operation();
let table = self.table();
let tags: &[metrics::KeyValue] = &[
metrics::KeyValue {
key: "operation".into(),
value: operation.into(),
},
metrics::KeyValue {
key: "table".into(),
value: table.into(),
},
];
let tags = router_env::metric_attributes!(("operation", operation), ("table", table));
let (result, execution_time) =
Box::pin(common_utils::date_time::time_it(|| self.execute(&conn))).await;
push_drainer_delay(pushed_at, operation, table, tags);
metrics::QUERY_EXECUTION_TIME.record(&metrics::CONTEXT, execution_time, tags);
metrics::QUERY_EXECUTION_TIME.record(execution_time, tags);
match result {
Ok(result) => {
logger::info!(operation = operation, table = table, ?result);
metrics::SUCCESSFUL_QUERY_EXECUTION.add(&metrics::CONTEXT, 1, tags);
metrics::SUCCESSFUL_QUERY_EXECUTION.add(1, tags);
Ok(())
}
Err(err) => {
logger::error!(operation = operation, table = table, ?err);
metrics::ERRORS_WHILE_QUERY_EXECUTION.add(&metrics::CONTEXT, 1, tags);
metrics::ERRORS_WHILE_QUERY_EXECUTION.add(1, tags);
Err(err)
}
}
@ -58,15 +49,25 @@ impl ExecuteQuery for kv::DBOperation {
}
#[inline(always)]
fn push_drainer_delay(pushed_at: i64, operation: &str, table: &str, tags: &[metrics::KeyValue]) {
fn push_drainer_delay(
pushed_at: i64,
operation: &str,
table: &str,
tags: &[router_env::opentelemetry::KeyValue],
) {
let drained_at = common_utils::date_time::now_unix_timestamp();
let delay = drained_at - pushed_at;
logger::debug!(
operation = operation,
table = table,
delay = format!("{delay} secs")
);
logger::debug!(operation, table, delay = format!("{delay} secs"));
metrics::DRAINER_DELAY_SECONDS.record(&metrics::CONTEXT, delay, tags);
match u64::try_from(delay) {
Ok(delay) => metrics::DRAINER_DELAY_SECONDS.record(delay, tags),
Err(error) => logger::error!(
pushed_at,
drained_at,
delay,
?error,
"Invalid drainer delay"
),
}
}

View File

@ -69,9 +69,8 @@ impl Store {
.await;
metrics::REDIS_STREAM_READ_TIME.record(
&metrics::CONTEXT,
execution_time,
&[metrics::KeyValue::new("stream", stream_name.to_owned())],
router_env::metric_attributes!(("stream", stream_name.to_owned())),
);
Ok(output?)
@ -104,9 +103,8 @@ impl Store {
.await;
metrics::REDIS_STREAM_TRIM_TIME.record(
&metrics::CONTEXT,
execution_time,
&[metrics::KeyValue::new("stream", stream_name.to_owned())],
router_env::metric_attributes!(("stream", stream_name.to_owned())),
);
// adding 1 because we are deleting the given id too

View File

@ -63,8 +63,8 @@ pub async fn increment_stream_index(
) -> u8 {
if index == total_streams - 1 {
match jobs_picked.load(atomic::Ordering::SeqCst) {
0 => metrics::CYCLES_COMPLETED_UNSUCCESSFULLY.add(&metrics::CONTEXT, 1, &[]),
_ => metrics::CYCLES_COMPLETED_SUCCESSFULLY.add(&metrics::CONTEXT, 1, &[]),
0 => metrics::CYCLES_COMPLETED_UNSUCCESSFULLY.add(1, &[]),
_ => metrics::CYCLES_COMPLETED_SUCCESSFULLY.add(1, &[]),
}
jobs_picked.store(0, atomic::Ordering::SeqCst);
0