feat(analytics): FRM Analytics (#4880)

Co-authored-by: hyperswitch-bot[bot] <148525504+hyperswitch-bot[bot]@users.noreply.github.com>
Co-authored-by: Abhitator216 <abhishek.kanojia@juspay.in>
Co-authored-by: Abhishek Kanojia <89402434+Abhitator216@users.noreply.github.com>
Co-authored-by: ivor-juspay <138492857+ivor-juspay@users.noreply.github.com>
Co-authored-by: Sampras Lopes <sampras.lopes@juspay.in>
This commit is contained in:
Sandeep Kumar
2024-07-04 12:22:27 +05:30
committed by GitHub
parent 7a1651d26b
commit cc88c0707f
36 changed files with 1629 additions and 78 deletions

View File

@ -9,6 +9,7 @@ use time::PrimitiveDateTime;
use super::{
active_payments::metrics::ActivePaymentsMetricRow,
auth_events::metrics::AuthEventMetricRow,
frm::{filters::FrmFilterRow, metrics::FrmMetricRow},
health_check::HealthCheck,
payment_intents::{filters::PaymentIntentFilterRow, metrics::PaymentIntentMetricRow},
payments::{
@ -130,6 +131,7 @@ impl AnalyticsDataSource for ClickhouseClient {
match table {
AnalyticsCollection::Payment
| AnalyticsCollection::Refund
| AnalyticsCollection::FraudCheck
| AnalyticsCollection::PaymentIntent
| AnalyticsCollection::Dispute => {
TableEngine::CollapsingMergeTree { sign: "sign_flag" }
@ -162,6 +164,8 @@ impl super::payment_intents::filters::PaymentIntentFilterAnalytics for Clickhous
impl super::payment_intents::metrics::PaymentIntentMetricAnalytics for ClickhouseClient {}
impl super::refunds::metrics::RefundMetricAnalytics for ClickhouseClient {}
impl super::refunds::filters::RefundFilterAnalytics for ClickhouseClient {}
impl super::frm::metrics::FrmMetricAnalytics for ClickhouseClient {}
impl super::frm::filters::FrmFilterAnalytics for ClickhouseClient {}
impl super::sdk_events::filters::SdkEventFilterAnalytics for ClickhouseClient {}
impl super::sdk_events::metrics::SdkEventMetricAnalytics for ClickhouseClient {}
impl super::sdk_events::events::SdkEventsFilterAnalytics for ClickhouseClient {}
@ -290,6 +294,25 @@ impl TryInto<RefundFilterRow> for serde_json::Value {
}
}
impl TryInto<FrmMetricRow> for serde_json::Value {
type Error = Report<ParsingError>;
fn try_into(self) -> Result<FrmMetricRow, Self::Error> {
serde_json::from_value(self).change_context(ParsingError::StructParseFailure(
"Failed to parse FrmMetricRow in clickhouse results",
))
}
}
impl TryInto<FrmFilterRow> for serde_json::Value {
type Error = Report<ParsingError>;
fn try_into(self) -> Result<FrmFilterRow, Self::Error> {
serde_json::from_value(self).change_context(ParsingError::StructParseFailure(
"Failed to parse FrmFilterRow in clickhouse results",
))
}
}
impl TryInto<DisputeMetricRow> for serde_json::Value {
type Error = Report<ParsingError>;
@ -409,6 +432,7 @@ impl ToSql<ClickhouseClient> for AnalyticsCollection {
match self {
Self::Payment => Ok("payment_attempts".to_string()),
Self::Refund => Ok("refunds".to_string()),
Self::FraudCheck => Ok("fraud_check".to_string()),
Self::SdkEvents => Ok("sdk_events_audit".to_string()),
Self::SdkEventsAnalytics => Ok("sdk_events".to_string()),
Self::ApiEvents => Ok("api_events_audit".to_string()),

View File

@ -21,6 +21,11 @@ pub async fn get_domain_info(
download_dimensions: None,
dimensions: utils::get_refund_dimensions(),
},
AnalyticsDomain::Frm => GetInfoResponse {
metrics: utils::get_frm_metrics_info(),
download_dimensions: None,
dimensions: utils::get_frm_dimensions(),
},
AnalyticsDomain::SdkEvents => GetInfoResponse {
metrics: utils::get_sdk_event_metrics_info(),
download_dimensions: None,

View File

@ -0,0 +1,9 @@
pub mod accumulator;
mod core;
pub mod filters;
pub mod metrics;
pub mod types;
pub use accumulator::{FrmMetricAccumulator, FrmMetricsAccumulator};
pub use self::core::{get_filters, get_metrics};

View File

@ -0,0 +1,78 @@
use api_models::analytics::frm::FrmMetricsBucketValue;
use common_enums::enums as storage_enums;
use super::metrics::FrmMetricRow;
#[derive(Debug, Default)]
pub struct FrmMetricsAccumulator {
pub frm_triggered_attempts: TriggeredAttemptsAccumulator,
pub frm_blocked_rate: BlockedRateAccumulator,
}
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct TriggeredAttemptsAccumulator {
pub count: Option<i64>,
}
#[derive(Debug, Default)]
pub struct BlockedRateAccumulator {
pub fraud: i64,
pub total: i64,
}
pub trait FrmMetricAccumulator {
type MetricOutput;
fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow);
fn collect(self) -> Self::MetricOutput;
}
impl FrmMetricAccumulator for TriggeredAttemptsAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.count.and_then(|i| u64::try_from(i).ok())
}
}
impl FrmMetricAccumulator for BlockedRateAccumulator {
type MetricOutput = Option<f64>;
fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow) {
if let Some(ref frm_status) = metrics.frm_status {
if frm_status.as_ref() == &storage_enums::FraudCheckStatus::Fraud {
self.fraud += metrics.count.unwrap_or_default();
}
};
self.total += metrics.count.unwrap_or_default();
}
fn collect(self) -> Self::MetricOutput {
if self.total <= 0 {
None
} else {
Some(
f64::from(u32::try_from(self.fraud).ok()?) * 100.0
/ f64::from(u32::try_from(self.total).ok()?),
)
}
}
}
impl FrmMetricsAccumulator {
pub fn collect(self) -> FrmMetricsBucketValue {
FrmMetricsBucketValue {
frm_blocked_rate: self.frm_blocked_rate.collect(),
frm_triggered_attempts: self.frm_triggered_attempts.collect(),
}
}
}

View File

@ -0,0 +1,193 @@
#![allow(dead_code)]
use std::collections::HashMap;
use api_models::analytics::{
frm::{FrmDimensions, FrmMetrics, FrmMetricsBucketIdentifier, FrmMetricsBucketResponse},
AnalyticsMetadata, FrmFilterValue, FrmFiltersResponse, GetFrmFilterRequest,
GetFrmMetricRequest, MetricsResponse,
};
use error_stack::ResultExt;
use router_env::{
logger,
metrics::add_attributes,
tracing::{self, Instrument},
};
use super::{
filters::{get_frm_filter_for_dimension, FrmFilterRow},
FrmMetricsAccumulator,
};
use crate::{
errors::{AnalyticsError, AnalyticsResult},
frm::FrmMetricAccumulator,
metrics, AnalyticsProvider,
};
pub async fn get_metrics(
pool: &AnalyticsProvider,
merchant_id: &String,
req: GetFrmMetricRequest,
) -> AnalyticsResult<MetricsResponse<FrmMetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<FrmMetricsBucketIdentifier, FrmMetricsAccumulator> =
HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span =
tracing::debug_span!("analytics_frm_query", frm_metric = metric_type.as_ref());
// Currently JoinSet works with only static lifetime references even if the task pool does not outlive the given reference
// We can optimize away this clone once that is fixed
let merchant_id_scoped = merchant_id.to_owned();
set.spawn(
async move {
let data = pool
.get_frm_metrics(
&metric_type,
&req.group_by_names.clone(),
&merchant_id_scoped,
&req.filters,
&req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
}
.instrument(task_span),
);
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
let data = data?;
let attributes = &add_attributes([
("metric_type", metric.to_string()),
("source", pool.to_string()),
]);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(&metrics::CONTEXT, val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
FrmMetrics::FrmBlockedRate => {
metrics_builder.frm_blocked_rate.add_metrics_bucket(&value)
}
FrmMetrics::FrmTriggeredAttempts => metrics_builder
.frm_triggered_attempts
.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let query_data: Vec<FrmMetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| FrmMetricsBucketResponse {
values: val.collect(),
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetFrmFilterRequest,
merchant_id: &String,
) -> AnalyticsResult<FrmFiltersResponse> {
let mut res = FrmFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_frm_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_frm_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) => {
let ckh_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => {
let ckh_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: FrmFilterRow| match dim {
FrmDimensions::FrmStatus => fil.frm_status.map(|i| i.as_ref().to_string()),
FrmDimensions::FrmName => fil.frm_name,
FrmDimensions::FrmTransactionType => {
fil.frm_transaction_type.map(|i| i.as_ref().to_string())
}
})
.collect::<Vec<String>>();
res.query_data.push(FrmFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}

View File

@ -0,0 +1,59 @@
use api_models::analytics::{
frm::{FrmDimensions, FrmTransactionType},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums::FraudCheckStatus;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{
AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult,
LoadRow,
},
};
pub trait FrmFilterAnalytics: LoadRow<FrmFilterRow> {}
pub async fn get_frm_filter_for_dimension<T>(
dimension: FrmDimensions,
merchant: &String,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<FrmFilterRow>>
where
T: AnalyticsDataSource + FrmFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", merchant)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<FrmFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
pub struct FrmFilterRow {
pub frm_status: Option<DBEnumWrapper<FraudCheckStatus>>,
pub frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>>,
pub frm_name: Option<String>,
}

View File

@ -0,0 +1,99 @@
use api_models::analytics::{
frm::{FrmDimensions, FrmFilters, FrmMetrics, FrmMetricsBucketIdentifier, FrmTransactionType},
Granularity, TimeRange,
};
use diesel_models::enums as storage_enums;
use time::PrimitiveDateTime;
mod frm_blocked_rate;
mod frm_triggered_attempts;
use frm_blocked_rate::FrmBlockedRate;
use frm_triggered_attempts::FrmTriggeredAttempts;
use crate::{
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult},
};
#[derive(Debug, Eq, PartialEq, serde::Deserialize)]
pub struct FrmMetricRow {
pub frm_name: Option<String>,
pub frm_status: Option<DBEnumWrapper<storage_enums::FraudCheckStatus>>,
pub frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>>,
pub total: Option<bigdecimal::BigDecimal>,
pub count: Option<i64>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub start_bucket: Option<PrimitiveDateTime>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub end_bucket: Option<PrimitiveDateTime>,
}
pub trait FrmMetricAnalytics: LoadRow<FrmMetricRow> {}
#[async_trait::async_trait]
pub trait FrmMetric<T>
where
T: AnalyticsDataSource + FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &str,
filters: &FrmFilters,
granularity: &Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>>;
}
#[async_trait::async_trait]
impl<T> FrmMetric<T> for FrmMetrics
where
T: AnalyticsDataSource + FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &str,
filters: &FrmFilters,
granularity: &Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> {
match self {
Self::FrmTriggeredAttempts => {
FrmTriggeredAttempts::default()
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::FrmBlockedRate => {
FrmBlockedRate::default()
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
}

View File

@ -0,0 +1,117 @@
use api_models::analytics::{
frm::{FrmDimensions, FrmFilters, FrmMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::FrmMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct FrmBlockedRate {}
#[async_trait::async_trait]
impl<T> super::FrmMetric<T> for FrmBlockedRate
where
T: AnalyticsDataSource + super::FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &str,
filters: &FrmFilters,
granularity: &Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>>
where
T: AnalyticsDataSource + super::FrmMetricAnalytics,
{
let mut query_builder = QueryBuilder::new(AnalyticsCollection::FraudCheck);
let mut dimensions = dimensions.to_vec();
dimensions.push(FrmDimensions::FrmStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity.as_ref() {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.execute_query::<FrmMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
FrmMetricsBucketIdentifier::new(
i.frm_name.as_ref().map(|i| i.to_string()),
None,
i.frm_transaction_type.as_ref().map(|i| i.0.to_string()),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}

View File

@ -0,0 +1,116 @@
use api_models::analytics::{
frm::{FrmDimensions, FrmFilters, FrmMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::FrmMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct FrmTriggeredAttempts {}
#[async_trait::async_trait]
impl<T> super::FrmMetric<T> for FrmTriggeredAttempts
where
T: AnalyticsDataSource + super::FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &str,
filters: &FrmFilters,
granularity: &Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity.as_ref() {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<FrmMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
FrmMetricsBucketIdentifier::new(
i.frm_name.as_ref().map(|i| i.to_string()),
i.frm_status.as_ref().map(|i| i.0.to_string()),
i.frm_transaction_type.as_ref().map(|i| i.0.to_string()),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<Vec<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}

View File

@ -0,0 +1,38 @@
use api_models::analytics::frm::{FrmDimensions, FrmFilters};
use error_stack::ResultExt;
use crate::{
query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
types::{AnalyticsCollection, AnalyticsDataSource},
};
impl<T> QueryFilter<T> for FrmFilters
where
T: AnalyticsDataSource,
AnalyticsCollection: ToSql<T>,
{
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.frm_status.is_empty() {
builder
.add_filter_in_range_clause(FrmDimensions::FrmStatus, &self.frm_status)
.attach_printable("Error adding frm status filter")?;
}
if !self.frm_name.is_empty() {
builder
.add_filter_in_range_clause(FrmDimensions::FrmName, &self.frm_name)
.attach_printable("Error adding frm name filter")?;
}
if !self.frm_transaction_type.is_empty() {
builder
.add_filter_in_range_clause(
FrmDimensions::FrmTransactionType,
&self.frm_transaction_type,
)
.attach_printable("Error adding frm transaction type filter")?;
}
Ok(())
}
}

View File

@ -2,6 +2,7 @@ mod clickhouse;
pub mod core;
pub mod disputes;
pub mod errors;
pub mod frm;
pub mod metrics;
pub mod payment_intents;
pub mod payments;
@ -40,6 +41,7 @@ use api_models::analytics::{
},
auth_events::{AuthEventMetrics, AuthEventMetricsBucketIdentifier},
disputes::{DisputeDimensions, DisputeFilters, DisputeMetrics, DisputeMetricsBucketIdentifier},
frm::{FrmDimensions, FrmFilters, FrmMetrics, FrmMetricsBucketIdentifier},
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetrics,
PaymentIntentMetricsBucketIdentifier,
@ -65,6 +67,7 @@ use strum::Display;
use self::{
active_payments::metrics::{ActivePaymentsMetric, ActivePaymentsMetricRow},
auth_events::metrics::{AuthEventMetric, AuthEventMetricRow},
frm::metrics::{FrmMetric, FrmMetricRow},
payment_intents::metrics::{PaymentIntentMetric, PaymentIntentMetricRow},
payments::{
distribution::{PaymentDistribution, PaymentDistributionRow},
@ -524,6 +527,106 @@ impl AnalyticsProvider {
.await
}
pub async fn get_frm_metrics(
&self,
metric: &FrmMetrics,
dimensions: &[FrmDimensions],
merchant_id: &str,
filters: &FrmFilters,
granularity: &Option<Granularity>,
time_range: &TimeRange,
) -> types::MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> {
// Metrics to get the fetch time for each refund metric
metrics::request::record_operation_time(
async {
match self {
Self::Sqlx(pool) => {
metric
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::Clickhouse(pool) => {
metric
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::CombinedCkh(sqlx_pool, ckh_pool) => {
let (ckh_result, sqlx_result) = tokio::join!(
metric.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
ckh_pool,
),
metric.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
sqlx_pool,
)
);
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics metrics")
}
_ => {}
};
ckh_result
}
Self::CombinedSqlx(sqlx_pool, ckh_pool) => {
let (ckh_result, sqlx_result) = tokio::join!(
metric.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
ckh_pool,
),
metric.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
sqlx_pool,
)
);
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics metrics")
}
_ => {}
};
sqlx_result
}
}
},
&metrics::METRIC_FETCH_TIME,
metric,
self,
)
.await
}
pub async fn get_dispute_metrics(
&self,
metric: &DisputeMetrics,
@ -870,12 +973,14 @@ pub enum AnalyticsFlow {
GetPaymentMetrics,
GetPaymentIntentMetrics,
GetRefundsMetrics,
GetFrmMetrics,
GetSdkMetrics,
GetAuthMetrics,
GetActivePaymentsMetrics,
GetPaymentFilters,
GetPaymentIntentFilters,
GetRefundFilters,
GetFrmFilters,
GetSdkEventFilters,
GetApiEvents,
GetSdkEvents,

View File

@ -6,6 +6,7 @@ use api_models::{
api_event::ApiEventDimensions,
auth_events::AuthEventFlows,
disputes::DisputeDimensions,
frm::{FrmDimensions, FrmTransactionType},
payment_intents::PaymentIntentDimensions,
payments::{PaymentDimensions, PaymentDistributions},
refunds::{RefundDimensions, RefundType},
@ -19,7 +20,7 @@ use api_models::{
refunds::RefundStatus,
};
use common_utils::errors::{CustomResult, ParsingError};
use diesel_models::enums as storage_enums;
use diesel_models::{enums as storage_enums, enums::FraudCheckStatus};
use error_stack::ResultExt;
use router_env::{logger, Flow};
@ -372,10 +373,12 @@ impl_to_sql_for_to_string!(
&PaymentDimensions,
&PaymentIntentDimensions,
&RefundDimensions,
&FrmDimensions,
PaymentDimensions,
PaymentIntentDimensions,
&PaymentDistributions,
RefundDimensions,
FrmDimensions,
PaymentMethod,
PaymentMethodType,
AuthenticationType,
@ -383,9 +386,11 @@ impl_to_sql_for_to_string!(
AttemptStatus,
IntentStatus,
RefundStatus,
FraudCheckStatus,
storage_enums::RefundStatus,
Currency,
RefundType,
FrmTransactionType,
Flow,
&String,
&bool,

View File

@ -1,7 +1,7 @@
use std::{fmt::Display, str::FromStr};
use api_models::{
analytics::refunds::RefundType,
analytics::{frm::FrmTransactionType, refunds::RefundType},
enums::{DisputeStage, DisputeStatus},
};
use common_utils::{
@ -9,7 +9,8 @@ use common_utils::{
DbConnectionParams,
};
use diesel_models::enums::{
AttemptStatus, AuthenticationType, Currency, IntentStatus, PaymentMethod, RefundStatus,
AttemptStatus, AuthenticationType, Currency, FraudCheckStatus, IntentStatus, PaymentMethod,
RefundStatus,
};
use error_stack::ResultExt;
use sqlx::{
@ -91,6 +92,8 @@ db_type!(IntentStatus);
db_type!(PaymentMethod, TEXT);
db_type!(RefundStatus);
db_type!(RefundType);
db_type!(FraudCheckStatus);
db_type!(FrmTransactionType);
db_type!(DisputeStage);
db_type!(DisputeStatus);
@ -150,6 +153,8 @@ impl super::refunds::metrics::RefundMetricAnalytics for SqlxClient {}
impl super::refunds::filters::RefundFilterAnalytics for SqlxClient {}
impl super::disputes::filters::DisputeFilterAnalytics for SqlxClient {}
impl super::disputes::metrics::DisputeMetricAnalytics for SqlxClient {}
impl super::frm::metrics::FrmMetricAnalytics for SqlxClient {}
impl super::frm::filters::FrmFilterAnalytics for SqlxClient {}
#[async_trait::async_trait]
impl AnalyticsDataSource for SqlxClient {
@ -230,6 +235,49 @@ impl<'a> FromRow<'a, PgRow> for super::refunds::metrics::RefundMetricRow {
}
}
impl<'a> FromRow<'a, PgRow> for super::frm::metrics::FrmMetricRow {
fn from_row(row: &'a PgRow) -> sqlx::Result<Self> {
let frm_name: Option<String> = row.try_get("frm_name").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let frm_status: Option<DBEnumWrapper<FraudCheckStatus>> =
row.try_get("frm_status").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>> =
row.try_get("frm_transaction_type").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let total: Option<bigdecimal::BigDecimal> = row.try_get("total").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let count: Option<i64> = row.try_get("count").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
// Removing millisecond precision to get accurate diffs against clickhouse
let start_bucket: Option<PrimitiveDateTime> = row
.try_get::<Option<PrimitiveDateTime>, _>("start_bucket")?
.and_then(|dt| dt.replace_millisecond(0).ok());
let end_bucket: Option<PrimitiveDateTime> = row
.try_get::<Option<PrimitiveDateTime>, _>("end_bucket")?
.and_then(|dt| dt.replace_millisecond(0).ok());
Ok(Self {
frm_name,
frm_status,
frm_transaction_type,
total,
count,
start_bucket,
end_bucket,
})
}
}
impl<'a> FromRow<'a, PgRow> for super::payments::metrics::PaymentMetricRow {
fn from_row(row: &'a PgRow) -> sqlx::Result<Self> {
let currency: Option<DBEnumWrapper<Currency>> =
@ -516,6 +564,30 @@ impl<'a> FromRow<'a, PgRow> for super::refunds::filters::RefundFilterRow {
}
}
impl<'a> FromRow<'a, PgRow> for super::frm::filters::FrmFilterRow {
fn from_row(row: &'a PgRow) -> sqlx::Result<Self> {
let frm_name: Option<String> = row.try_get("frm_name").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let frm_status: Option<DBEnumWrapper<FraudCheckStatus>> =
row.try_get("frm_status").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>> =
row.try_get("frm_transaction_type").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
Ok(Self {
frm_name,
frm_status,
frm_transaction_type,
})
}
}
impl<'a> FromRow<'a, PgRow> for super::disputes::filters::DisputeFilterRow {
fn from_row(row: &'a PgRow) -> sqlx::Result<Self> {
let dispute_stage: Option<String> = row.try_get("dispute_stage").or_else(|e| match e {
@ -604,6 +676,7 @@ impl ToSql<SqlxClient> for AnalyticsCollection {
.attach_printable("SdkEvents table is not implemented for Sqlx"))?,
Self::ApiEvents => Err(error_stack::report!(ParsingError::UnknownError)
.attach_printable("ApiEvents table is not implemented for Sqlx"))?,
Self::FraudCheck => Ok("fraud_check".to_string()),
Self::PaymentIntent => Ok("payment_intent".to_string()),
Self::ConnectorEvents => Err(error_stack::report!(ParsingError::UnknownError)
.attach_printable("ConnectorEvents table is not implemented for Sqlx"))?,

View File

@ -15,6 +15,7 @@ use crate::errors::AnalyticsError;
pub enum AnalyticsDomain {
Payments,
Refunds,
Frm,
PaymentIntents,
AuthEvents,
SdkEvents,
@ -26,6 +27,7 @@ pub enum AnalyticsDomain {
pub enum AnalyticsCollection {
Payment,
Refund,
FraudCheck,
SdkEvents,
SdkEventsAnalytics,
ApiEvents,

View File

@ -2,6 +2,7 @@ use api_models::analytics::{
api_event::{ApiEventDimensions, ApiEventMetrics},
auth_events::AuthEventMetrics,
disputes::{DisputeDimensions, DisputeMetrics},
frm::{FrmDimensions, FrmMetrics},
payment_intents::{PaymentIntentDimensions, PaymentIntentMetrics},
payments::{PaymentDimensions, PaymentMetrics},
refunds::{RefundDimensions, RefundMetrics},
@ -22,6 +23,10 @@ pub fn get_refund_dimensions() -> Vec<NameDescription> {
RefundDimensions::iter().map(Into::into).collect()
}
pub fn get_frm_dimensions() -> Vec<NameDescription> {
FrmDimensions::iter().map(Into::into).collect()
}
pub fn get_sdk_event_dimensions() -> Vec<NameDescription> {
SdkEventDimensions::iter().map(Into::into).collect()
}
@ -42,6 +47,10 @@ pub fn get_refund_metrics_info() -> Vec<NameDescription> {
RefundMetrics::iter().map(Into::into).collect()
}
pub fn get_frm_metrics_info() -> Vec<NameDescription> {
FrmMetrics::iter().map(Into::into).collect()
}
pub fn get_sdk_event_metrics_info() -> Vec<NameDescription> {
SdkEventMetrics::iter().map(Into::into).collect()
}