mirror of
https://github.com/juspay/hyperswitch.git
synced 2025-10-28 04:04:55 +08:00
feat(metrics): add histogram and update opentelemetry dependencies (#32)
This commit is contained in:
@ -75,7 +75,7 @@ impl Feature<api::Authorize, types::PaymentsRequestData>
|
||||
)
|
||||
.await;
|
||||
|
||||
metrics::PAYMENT_COUNT.add(1, &[]); // Metrics
|
||||
metrics::PAYMENT_COUNT.add(&metrics::CONTEXT, 1, &[]); // Metrics
|
||||
|
||||
(resp, payment_data)
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ where
|
||||
payment_attempt: &storage::PaymentAttempt,
|
||||
) -> CustomResult<(), errors::ApiErrorResponse> {
|
||||
if helpers::check_if_operation_confirm(self) {
|
||||
metrics::TASKS_ADDED_COUNT.add(1, &[]); // Metrics
|
||||
metrics::TASKS_ADDED_COUNT.add(&metrics::CONTEXT, 1, &[]); // Metrics
|
||||
|
||||
let schedule_time = payment_sync::get_sync_process_schedule_time(
|
||||
&payment_attempt.connector,
|
||||
|
||||
@ -3,14 +3,14 @@ use router_env::{
|
||||
tracing::{self, instrument},
|
||||
};
|
||||
|
||||
use crate::routes::metrics::HEALTH_METRIC;
|
||||
use crate::routes::metrics;
|
||||
|
||||
/// .
|
||||
// #[logger::instrument(skip_all, name = "name1", level = "warn", fields( key1 = "val1" ))]
|
||||
#[instrument(skip_all)]
|
||||
// #[actix_web::get("/health")]
|
||||
pub async fn health() -> impl actix_web::Responder {
|
||||
HEALTH_METRIC.add(1, &[]);
|
||||
metrics::HEALTH_METRIC.add(&metrics::CONTEXT, 1, &[]);
|
||||
logger::info!("Health was called");
|
||||
actix_web::HttpResponse::Ok().body("health is good")
|
||||
}
|
||||
|
||||
@ -4,8 +4,10 @@ use once_cell::sync::Lazy;
|
||||
use router_env::opentelemetry::{
|
||||
global,
|
||||
metrics::{Counter, Meter},
|
||||
Context,
|
||||
};
|
||||
|
||||
pub static CONTEXT: Lazy<Context> = Lazy::new(Context::current);
|
||||
static GLOBAL_METER: Lazy<Meter> = Lazy::new(|| global::meter("ROUTER_API"));
|
||||
|
||||
pub(crate) static HEALTH_METRIC: Lazy<Counter<u64>> =
|
||||
|
||||
@ -117,7 +117,7 @@ pub async fn consumer_operations(
|
||||
|
||||
pt_utils::add_histogram_metrics(&pickup_time, task, &stream_name);
|
||||
|
||||
metrics::TASK_CONSUMED.add(1, &[]);
|
||||
metrics::TASK_CONSUMED.add(&metrics::CONTEXT, 1, &[]);
|
||||
let runner = pt_utils::runner_from_task(task)?;
|
||||
handler.push(tokio::task::spawn(start_workflow(
|
||||
state.clone(),
|
||||
@ -205,7 +205,7 @@ pub async fn run_executor<'a>(
|
||||
}
|
||||
},
|
||||
};
|
||||
metrics::TASK_PROCESSED.add(1, &[]);
|
||||
metrics::TASK_PROCESSED.add(&metrics::CONTEXT, 1, &[]);
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
|
||||
@ -1,15 +1,15 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use router_env::opentelemetry::{
|
||||
global,
|
||||
metrics::{Counter, Meter, ValueRecorder},
|
||||
metrics::{Counter, Histogram, Meter},
|
||||
Context,
|
||||
};
|
||||
|
||||
pub static CONTEXT: Lazy<Context> = Lazy::new(Context::current);
|
||||
static PT_METER: Lazy<Meter> = Lazy::new(|| global::meter("PROCESS_TRACKER"));
|
||||
|
||||
// Using ValueRecorder till https://bitbucket.org/juspay/orca/pull-requests/319
|
||||
// Histogram available in opentelemetry:0.18
|
||||
pub(crate) static CONSUMER_STATS: Lazy<ValueRecorder<f64>> =
|
||||
Lazy::new(|| PT_METER.f64_value_recorder("CONSUMER_OPS").init());
|
||||
pub(crate) static CONSUMER_STATS: Lazy<Histogram<f64>> =
|
||||
Lazy::new(|| PT_METER.f64_histogram("CONSUMER_OPS").init());
|
||||
|
||||
macro_rules! create_counter {
|
||||
($name:ident, $meter:ident) => {
|
||||
|
||||
@ -124,6 +124,6 @@ pub async fn fetch_producer_tasks(
|
||||
}
|
||||
|
||||
new_tasks.append(&mut pending_tasks);
|
||||
metrics::TASKS_PICKED_COUNT.add(new_tasks.len() as u64, &[]);
|
||||
metrics::TASKS_PICKED_COUNT.add(&metrics::CONTEXT, new_tasks.len() as u64, &[]);
|
||||
Ok(new_tasks)
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ pub async fn divide_and_append_tasks(
|
||||
settings: &SchedulerSettings,
|
||||
) -> CustomResult<(), errors::ProcessTrackerError> {
|
||||
let batches = divide(tasks, settings);
|
||||
metrics::BATCHES_CREATED.add(batches.len() as u64, &[]); // Metrics
|
||||
metrics::BATCHES_CREATED.add(&metrics::CONTEXT, batches.len() as u64, &[]); // Metrics
|
||||
for batch in batches {
|
||||
let result = update_status_and_append(state, flow, batch).await;
|
||||
match result {
|
||||
@ -209,7 +209,7 @@ pub async fn get_batches(
|
||||
logger::error!(%error, "Error finding batch in stream");
|
||||
error.change_context(errors::ProcessTrackerError::BatchNotFound)
|
||||
})?;
|
||||
metrics::BATCHES_CONSUMED.add(1, &[]);
|
||||
metrics::BATCHES_CONSUMED.add(&metrics::CONTEXT, 1, &[]);
|
||||
|
||||
let (batches, entry_ids): (Vec<Vec<ProcessTrackerBatch>>, Vec<Vec<String>>) = response.into_iter().map(|(_key, entries)| {
|
||||
entries.into_iter().try_fold(
|
||||
@ -303,6 +303,7 @@ pub fn add_histogram_metrics(
|
||||
logger::error!(%pickup_schedule_delta, "<- Time delta for scheduled tasks");
|
||||
let runner_name = runner.clone();
|
||||
metrics::CONSUMER_STATS.record(
|
||||
&metrics::CONTEXT,
|
||||
pickup_schedule_delta,
|
||||
&[opentelemetry::KeyValue::new(
|
||||
stream_name.to_owned(),
|
||||
|
||||
@ -75,7 +75,7 @@ impl ProcessTracker {
|
||||
db: &dyn db::Db,
|
||||
schedule_time: PrimitiveDateTime,
|
||||
) -> Result<(), errors::ProcessTrackerError> {
|
||||
metrics::TASK_RETRIED.add(1, &[]);
|
||||
metrics::TASK_RETRIED.add(&metrics::CONTEXT, 1, &[]);
|
||||
db.update_process_tracker(
|
||||
self.clone(),
|
||||
ProcessTrackerUpdate::StatusRetryUpdate {
|
||||
@ -102,7 +102,7 @@ impl ProcessTracker {
|
||||
)
|
||||
.await
|
||||
.attach_printable("Failed while updating status of the process")?;
|
||||
metrics::TASK_FINISHED.add(1, &[]);
|
||||
metrics::TASK_FINISHED.add(&metrics::CONTEXT, 1, &[]);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user