id
stringlengths
11
116
type
stringclasses
1 value
granularity
stringclasses
4 values
content
stringlengths
16
477k
metadata
dict
file_analytics_2347948248287392286
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/success_rate.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentSuccessRate; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentSuccessRate where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentDimensions::PaymentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/success_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-7862547295667642311
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/avg_ticket_size.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::{PaymentMetric, PaymentMetricRow}; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct AvgTicketSize; #[async_trait::async_trait] impl<T> PaymentMetric<T> for AvgTicketSize where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.status.as_ref().map(|i| i.0), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/avg_ticket_size.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_20981847917325451
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/debit_routing.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct DebitRouting; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for DebitRouting where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Sum { field: "debit_routing_savings", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/debit_routing.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-3360137758034463648
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/payment_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentCount; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentCount where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.status.as_ref().map(|i| i.0), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/payment_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_7776082958311165957
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/payment_success_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentSuccessCount; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentSuccessCount where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/payment_success_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-205257074100372741
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/payment_processed_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentProcessedAmount; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentProcessedAmount where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/payment_processed_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_7258543732838036510
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/failure_reasons.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct FailureReasons; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for FailureReasons where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut inner_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); inner_query_builder .add_select_column("sum(sign_flag)") .switch()?; inner_query_builder .add_custom_filter_clause( PaymentDimensions::ErrorReason, "NULL", FilterTypes::IsNotNull, ) .switch()?; time_range .set_filter_clause(&mut inner_query_builder) .attach_printable("Error filtering time range for inner query") .switch()?; let inner_query_string = inner_query_builder .build_query() .attach_printable("Error building inner query") .change_context(MetricsError::QueryBuildingError)?; let mut outer_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); for dim in dimensions.iter() { outer_query_builder.add_select_column(dim).switch()?; } outer_query_builder .add_select_column("sum(sign_flag) AS count") .switch()?; outer_query_builder .add_select_column(format!("({inner_query_string}) AS total")) .switch()?; outer_query_builder .add_select_column("first_attempt") .switch()?; outer_query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; outer_query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters .set_filter_clause(&mut outer_query_builder) .switch()?; auth.set_filter_clause(&mut outer_query_builder).switch()?; time_range .set_filter_clause(&mut outer_query_builder) .attach_printable("Error filtering time range for outer query") .switch()?; outer_query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Failure, ) .switch()?; outer_query_builder .add_custom_filter_clause( PaymentDimensions::ErrorReason, "NULL", FilterTypes::IsNotNull, ) .switch()?; for dim in dimensions.iter() { outer_query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } outer_query_builder .add_group_by_clause("first_attempt") .attach_printable("Error grouping by first_attempt") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut outer_query_builder) .attach_printable("Error adding granularity") .switch()?; } outer_query_builder .add_order_by_clause("count", Order::Descending) .attach_printable("Error adding order by clause") .switch()?; let filtered_dimensions: Vec<&PaymentDimensions> = dimensions .iter() .filter(|&&dim| dim != PaymentDimensions::ErrorReason) .collect(); for dim in &filtered_dimensions { outer_query_builder .add_order_by_clause(*dim, Order::Ascending) .attach_printable("Error adding order by clause") .switch()?; } outer_query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/failure_reasons.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_3328174639137912010
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/connector_success_rate.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct ConnectorSuccessRate; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for ConnectorSuccessRate where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentDimensions::PaymentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause(PaymentDimensions::Connector, "NULL", FilterTypes::IsNotNull) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/connector_success_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_3412768119110650944
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/retries_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::{ analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }, enums::IntentStatus, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct RetriesCount; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for RetriesCount where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, _dimensions: &[PaymentDimensions], auth: &AuthInfo, _filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) .switch()?; query_builder .add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/retries_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6662490502617155266
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/payments_distribution.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentsDistribution; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentsDistribution where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentDimensions::PaymentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder.add_select_column("first_attempt").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("first_attempt") .attach_printable("Error grouping by first_attempt") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/payments_distribution.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-4954295552647122490
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/success_rate.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentSuccessRate; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentSuccessRate where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentDimensions::PaymentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/success_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_95580321786968284
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/avg_ticket_size.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::{PaymentMetric, PaymentMetricRow}; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct AvgTicketSize; #[async_trait::async_trait] impl<T> PaymentMetric<T> for AvgTicketSize where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.status.as_ref().map(|i| i.0), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/avg_ticket_size.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2329822340092204331
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/debit_routing.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct DebitRouting; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for DebitRouting where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Sum { field: "debit_routing_savings", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/debit_routing.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-749495020790223739
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/payment_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentCount; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentCount where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.status.as_ref().map(|i| i.0), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/payment_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-2557280305352373930
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/payment_success_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentSuccessCount; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentSuccessCount where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/payment_success_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6371517476608548620
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/metrics/sessionized_metrics/payment_processed_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentProcessedAmount; #[async_trait::async_trait] impl<T> super::PaymentMetric<T> for PaymentProcessedAmount where T: AnalyticsDataSource + super::PaymentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentDimensions::PaymentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder.add_select_column("first_attempt").switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("first_attempt") .attach_printable("Error grouping by first_attempt") .switch()?; query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Charged, ) .switch()?; query_builder .execute_query::<PaymentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/metrics/sessionized_metrics/payment_processed_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-5310405064947813666
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payments/distribution/payment_error_message.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier}, Granularity, PaymentDistributionBody, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::{PaymentDistribution, PaymentDistributionRow}; use crate::{ enums::AuthInfo, query::{ Aggregate, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentErrorMessage; #[async_trait::async_trait] impl<T> PaymentDistribution<T> for PaymentErrorMessage where T: AnalyticsDataSource + super::PaymentDistributionAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_distribution( &self, distribution: &PaymentDistributionBody, dimensions: &[PaymentDimensions], auth: &AuthInfo, filters: &PaymentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(PaymentMetricsBucketIdentifier, PaymentDistributionRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(&distribution.distribution_for) .switch()?; query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause(&distribution.distribution_for) .attach_printable("Error grouping by distribution_for") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentDimensions::PaymentStatus, storage_enums::AttemptStatus::Failure, ) .switch()?; for dim in dimensions.iter() { query_builder.add_outer_select_column(dim).switch()?; } query_builder .add_outer_select_column(&distribution.distribution_for) .switch()?; query_builder.add_outer_select_column("count").switch()?; query_builder .add_outer_select_column("start_bucket") .switch()?; query_builder .add_outer_select_column("end_bucket") .switch()?; let sql_dimensions = query_builder.transform_to_sql_values(dimensions).switch()?; query_builder .add_outer_select_column(Window::Sum { field: "count", partition_by: Some(sql_dimensions), order_by: None, alias: Some("total"), }) .switch()?; query_builder .add_top_n_clause( dimensions, distribution.distribution_cardinality.into(), "count", Order::Descending, ) .switch()?; query_builder .execute_query::<PaymentDistributionRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.status.as_ref().map(|i| i.0), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.client_source.clone(), i.client_version.clone(), i.profile_id.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), i.routing_approach.as_ref().map(|i| i.0.clone()), i.signature_network.clone(), i.is_issuer_regulated, i.is_debit_routed, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< Vec<(PaymentMetricsBucketIdentifier, PaymentDistributionRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payments/distribution/payment_error_message.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_7585798484664255248
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/routing_events/events.rs // Contains: 1 structs, 0 enums use api_models::analytics::{routing_events::RoutingEventsRequest, Granularity}; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, }; pub trait RoutingEventLogAnalytics: LoadRow<RoutingEventsResult> {} pub async fn get_routing_events<T>( merchant_id: &common_utils::id_type::MerchantId, query_param: RoutingEventsRequest, pool: &T, ) -> FiltersResult<Vec<RoutingEventsResult>> where T: AnalyticsDataSource + RoutingEventLogAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RoutingEvents); query_builder.add_select_column("*").switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; query_builder .add_filter_clause("payment_id", &query_param.payment_id) .switch()?; if let Some(refund_id) = query_param.refund_id { query_builder .add_filter_clause("refund_id", &refund_id) .switch()?; } if let Some(dispute_id) = query_param.dispute_id { query_builder .add_filter_clause("dispute_id", &dispute_id) .switch()?; } query_builder .execute_query::<RoutingEventsResult, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct RoutingEventsResult { pub merchant_id: common_utils::id_type::MerchantId, pub profile_id: common_utils::id_type::ProfileId, pub payment_id: String, pub routable_connectors: String, pub payment_connector: Option<String>, pub request_id: Option<String>, pub flow: String, pub url: Option<String>, pub request: String, pub response: Option<String>, pub error: Option<String>, pub status_code: Option<u16>, #[serde(with = "common_utils::custom_serde::iso8601")] pub created_at: PrimitiveDateTime, pub method: String, pub routing_engine: String, pub routing_approach: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/routing_events/events.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_878999493930494730
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/connector_events/events.rs // Contains: 1 structs, 0 enums use api_models::analytics::{connector_events::ConnectorEventsRequest, Granularity}; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, }; pub trait ConnectorEventLogAnalytics: LoadRow<ConnectorEventsResult> {} pub async fn get_connector_events<T>( merchant_id: &common_utils::id_type::MerchantId, query_param: ConnectorEventsRequest, pool: &T, ) -> FiltersResult<Vec<ConnectorEventsResult>> where T: AnalyticsDataSource + ConnectorEventLogAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ConnectorEvents); query_builder.add_select_column("*").switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; query_builder .add_filter_clause("payment_id", &query_param.payment_id) .switch()?; if let Some(refund_id) = query_param.refund_id { query_builder .add_filter_clause("refund_id", &refund_id) .switch()?; } if let Some(dispute_id) = query_param.dispute_id { query_builder .add_filter_clause("dispute_id", &dispute_id) .switch()?; } //TODO!: update the execute_query function to return reports instead of plain errors... query_builder .execute_query::<ConnectorEventsResult, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct ConnectorEventsResult { pub merchant_id: common_utils::id_type::MerchantId, pub payment_id: String, pub connector_name: Option<String>, pub request_id: Option<String>, pub flow: String, pub request: String, #[serde(rename = "masked_response")] pub response: Option<String>, pub error: Option<String>, pub status_code: u16, pub latency: Option<u128>, #[serde(with = "common_utils::custom_serde::iso8601")] pub created_at: PrimitiveDateTime, pub method: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/connector_events/events.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_6134392508969727747
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/api_event/events.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ api_event::{ApiLogsRequest, QueryType}, Granularity, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use router_env::Flow; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, }; pub trait ApiLogsFilterAnalytics: LoadRow<ApiLogsResult> {} pub async fn get_api_event<T>( merchant_id: &common_utils::id_type::MerchantId, query_param: ApiLogsRequest, pool: &T, ) -> FiltersResult<Vec<ApiLogsResult>> where T: AnalyticsDataSource + ApiLogsFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents); query_builder.add_select_column("*").switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; match query_param.query_param { QueryType::Payment { payment_id } => { query_builder .add_filter_clause("payment_id", &payment_id) .switch()?; query_builder .add_filter_in_range_clause( "api_flow", &[ Flow::PaymentsCancel, Flow::PaymentsCapture, Flow::PaymentsConfirm, Flow::PaymentsCreate, Flow::PaymentsStart, Flow::PaymentsUpdate, Flow::RefundsCreate, Flow::RefundsUpdate, Flow::DisputesEvidenceSubmit, Flow::AttachDisputeEvidence, Flow::RetrieveDisputeEvidence, Flow::IncomingWebhookReceive, ], ) .switch()?; } QueryType::Refund { payment_id, refund_id, } => { query_builder .add_filter_clause("payment_id", &payment_id) .switch()?; query_builder .add_filter_clause("refund_id", refund_id) .switch()?; query_builder .add_filter_in_range_clause("api_flow", &[Flow::RefundsCreate, Flow::RefundsUpdate]) .switch()?; } QueryType::Dispute { payment_id, dispute_id, } => { query_builder .add_filter_clause("payment_id", &payment_id) .switch()?; query_builder .add_filter_clause("dispute_id", dispute_id) .switch()?; query_builder .add_filter_in_range_clause( "api_flow", &[ Flow::DisputesEvidenceSubmit, Flow::AttachDisputeEvidence, Flow::RetrieveDisputeEvidence, ], ) .switch()?; } } //TODO!: update the execute_query function to return reports instead of plain errors... query_builder .execute_query::<ApiLogsResult, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct ApiLogsResult { pub merchant_id: common_utils::id_type::MerchantId, pub payment_id: Option<common_utils::id_type::PaymentId>, pub refund_id: Option<String>, pub payment_method_id: Option<String>, pub payment_method: Option<String>, pub payment_method_type: Option<String>, pub customer_id: Option<String>, pub user_id: Option<String>, pub connector: Option<String>, pub request_id: Option<String>, pub flow_type: String, pub api_flow: String, pub api_auth_type: Option<String>, pub request: String, pub response: Option<String>, pub error: Option<String>, pub authentication_data: Option<String>, pub status_code: u16, pub latency: Option<u128>, pub user_agent: Option<String>, pub hs_latency: Option<u128>, pub ip_addr: Option<String>, #[serde(with = "common_utils::custom_serde::iso8601")] pub created_at: PrimitiveDateTime, pub http_method: Option<String>, pub url_path: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/api_event/events.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_6934919017236060631
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/api_event/metrics.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ api_event::{ ApiEventDimensions, ApiEventFilters, ApiEventMetrics, ApiEventMetricsBucketIdentifier, }, Granularity, TimeRange, }; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult}, }; mod api_count; pub mod latency; mod status_code_count; use std::collections::HashSet; use api_count::ApiCount; use latency::MaxLatency; use status_code_count::StatusCodeCount; use self::latency::LatencyAvg; #[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)] pub struct ApiEventMetricRow { pub latency: Option<u64>, pub api_count: Option<u64>, pub status_code_count: Option<u64>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, } pub trait ApiEventMetricAnalytics: LoadRow<ApiEventMetricRow> + LoadRow<LatencyAvg> {} #[async_trait::async_trait] pub trait ApiEventMetric<T> where T: AnalyticsDataSource + ApiEventMetricAnalytics, { async fn load_metrics( &self, dimensions: &[ApiEventDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &ApiEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>>; } #[async_trait::async_trait] impl<T> ApiEventMetric<T> for ApiEventMetrics where T: AnalyticsDataSource + ApiEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[ApiEventDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &ApiEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> { match self { Self::Latency => { MaxLatency .load_metrics( dimensions, merchant_id, filters, granularity, time_range, pool, ) .await } Self::ApiCount => { ApiCount .load_metrics( dimensions, merchant_id, filters, granularity, time_range, pool, ) .await } Self::StatusCodeCount => { StatusCodeCount .load_metrics( dimensions, merchant_id, filters, granularity, time_range, pool, ) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/api_event/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-615183932543084675
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/api_event/filters.rs // Contains: 1 structs, 0 enums use api_models::analytics::{api_event::ApiEventDimensions, Granularity, TimeRange}; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, }; pub trait ApiEventFilterAnalytics: LoadRow<ApiEventFilter> {} pub async fn get_api_event_filter_for_dimension<T>( dimension: ApiEventDimensions, merchant_id: &common_utils::id_type::MerchantId, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<ApiEventFilter>> where T: AnalyticsDataSource + ApiEventFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; query_builder.set_distinct(); query_builder .execute_query::<ApiEventFilter, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct ApiEventFilter { pub status_code: Option<i32>, pub flow_type: Option<String>, pub api_flow: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/api_event/filters.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_4949181199789044310
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/api_event/metrics/latency.rs // Contains: 2 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::ApiEventMetricRow; use crate::{ query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct MaxLatency; #[async_trait::async_trait] impl<T> super::ApiEventMetric<T> for MaxLatency where T: AnalyticsDataSource + super::ApiEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, _dimensions: &[ApiEventDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &ApiEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents); query_builder .add_select_column(Aggregate::Sum { field: "latency", alias: Some("latency_sum"), }) .switch()?; query_builder .add_select_column(Aggregate::Count { field: Some("latency"), alias: Some("latency_count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .add_custom_filter_clause("request", "10.63.134.6", FilterTypes::NotLike) .attach_printable("Error filtering out locker IP") .switch()?; query_builder .execute_query::<LatencyAvg, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( ApiEventMetricsBucketIdentifier::new(TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }), ApiEventMetricRow { latency: if i.latency_count != 0 { Some(i.latency_sum.unwrap_or(0) / i.latency_count) } else { None }, api_count: None, status_code_count: None, start_bucket: i.start_bucket, end_bucket: i.end_bucket, }, )) }) .collect::<error_stack::Result< HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } } #[derive(Debug, PartialEq, Eq, serde::Deserialize)] pub struct LatencyAvg { latency_sum: Option<u64>, latency_count: u64, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, }
{ "crate": "analytics", "file": "crates/analytics/src/api_event/metrics/latency.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 2, "num_tables": null, "score": null, "total_crates": null }
file_analytics_1163417321627557836
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/api_event/metrics/status_code_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::ApiEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct StatusCodeCount; #[async_trait::async_trait] impl<T> super::ApiEventMetric<T> for StatusCodeCount where T: AnalyticsDataSource + super::ApiEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, _dimensions: &[ApiEventDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &ApiEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents); query_builder .add_select_column(Aggregate::Count { field: Some("status_code"), alias: Some("status_code_count"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<ApiEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( ApiEventMetricsBucketIdentifier::new(TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }), i, )) }) .collect::<error_stack::Result< HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/api_event/metrics/status_code_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2987645151452747281
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/api_event/metrics/api_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::ApiEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct ApiCount; #[async_trait::async_trait] impl<T> super::ApiEventMetric<T> for ApiCount where T: AnalyticsDataSource + super::ApiEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, _dimensions: &[ApiEventDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &ApiEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents); query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("api_count"), }) .switch()?; if !filters.flow_type.is_empty() { query_builder .add_filter_in_range_clause(ApiEventDimensions::FlowType, &filters.flow_type) .attach_printable("Error adding flow_type filter") .switch()?; } query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .execute_query::<ApiEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( ApiEventMetricsBucketIdentifier::new(TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }), i, )) }) .collect::<error_stack::Result< HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/api_event/metrics/api_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-8553841876636268778
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetrics, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, }; mod payment_intent_count; mod payment_processed_amount; mod payments_success_rate; mod sessionized_metrics; mod smart_retried_amount; mod successful_smart_retries; mod total_smart_retries; use payment_intent_count::PaymentIntentCount; use payment_processed_amount::PaymentProcessedAmount; use payments_success_rate::PaymentsSuccessRate; use smart_retried_amount::SmartRetriedAmount; use successful_smart_retries::SuccessfulSmartRetries; use total_smart_retries::TotalSmartRetries; #[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)] pub struct PaymentIntentMetricRow { pub status: Option<DBEnumWrapper<storage_enums::IntentStatus>>, pub currency: Option<DBEnumWrapper<storage_enums::Currency>>, pub profile_id: Option<String>, pub connector: Option<String>, pub authentication_type: Option<DBEnumWrapper<storage_enums::AuthenticationType>>, pub payment_method: Option<String>, pub payment_method_type: Option<String>, pub card_network: Option<String>, pub merchant_id: Option<String>, pub card_last_4: Option<String>, pub card_issuer: Option<String>, pub error_reason: Option<String>, pub first_attempt: Option<i64>, pub total: Option<bigdecimal::BigDecimal>, pub count: Option<i64>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, } pub trait PaymentIntentMetricAnalytics: LoadRow<PaymentIntentMetricRow> {} #[async_trait::async_trait] pub trait PaymentIntentMetric<T> where T: AnalyticsDataSource + PaymentIntentMetricAnalytics, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>; } #[async_trait::async_trait] impl<T> PaymentIntentMetric<T> for PaymentIntentMetrics where T: AnalyticsDataSource + PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { match self { Self::SuccessfulSmartRetries => { SuccessfulSmartRetries .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::TotalSmartRetries => { TotalSmartRetries .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SmartRetriedAmount => { SmartRetriedAmount .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::PaymentIntentCount => { PaymentIntentCount .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::PaymentsSuccessRate => { PaymentsSuccessRate .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::PaymentProcessedAmount => { PaymentProcessedAmount .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedSuccessfulSmartRetries => { sessionized_metrics::SuccessfulSmartRetries .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedTotalSmartRetries => { sessionized_metrics::TotalSmartRetries .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedSmartRetriedAmount => { sessionized_metrics::SmartRetriedAmount .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedPaymentIntentCount => { sessionized_metrics::PaymentIntentCount .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedPaymentsSuccessRate => { sessionized_metrics::PaymentsSuccessRate .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedPaymentProcessedAmount => { sessionized_metrics::PaymentProcessedAmount .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedPaymentsDistribution => { sessionized_metrics::PaymentsDistribution .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_8446989300413496235
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/sankey.rs // Contains: 1 structs, 2 enums use common_enums::enums; use common_utils::{ errors::ParsingError, types::{authentication::AuthInfo, TimeRange}, }; use error_stack::ResultExt; use router_env::logger; use crate::{ clickhouse::ClickhouseClient, query::{Aggregate, QueryBuilder, QueryFilter}, types::{AnalyticsCollection, DBEnumWrapper, MetricsError, MetricsResult}, }; #[derive( Clone, Copy, Debug, Default, Eq, Hash, PartialEq, serde::Deserialize, serde::Serialize, strum::Display, strum::EnumIter, strum::EnumString, )] #[serde(rename_all = "snake_case")] pub enum SessionizerRefundStatus { FullRefunded, #[default] NotRefunded, PartialRefunded, } #[derive( Clone, Copy, Debug, Default, Eq, Hash, PartialEq, serde::Deserialize, serde::Serialize, strum::Display, strum::EnumIter, strum::EnumString, )] #[serde(rename_all = "snake_case")] pub enum SessionizerDisputeStatus { DisputePresent, #[default] NotDisputed, } #[derive(Debug, serde::Deserialize, serde::Serialize)] pub struct SankeyRow { pub count: i64, pub status: DBEnumWrapper<enums::IntentStatus>, #[serde(default)] pub refunds_status: Option<DBEnumWrapper<SessionizerRefundStatus>>, #[serde(default)] pub dispute_status: Option<DBEnumWrapper<SessionizerDisputeStatus>>, pub first_attempt: i64, } impl TryInto<SankeyRow> for serde_json::Value { type Error = error_stack::Report<ParsingError>; fn try_into(self) -> Result<SankeyRow, Self::Error> { logger::debug!("Parsing SankeyRow from {:?}", self); serde_json::from_value(self).change_context(ParsingError::StructParseFailure( "Failed to parse Sankey in clickhouse results", )) } } pub async fn get_sankey_data( clickhouse_client: &ClickhouseClient, auth: &AuthInfo, time_range: &TimeRange, ) -> MetricsResult<Vec<SankeyRow>> { let mut query_builder = QueryBuilder::<ClickhouseClient>::new(AnalyticsCollection::PaymentIntentSessionized); query_builder .add_select_column(Aggregate::<String>::Count { field: None, alias: Some("count"), }) .change_context(MetricsError::QueryBuildingError)?; query_builder .add_select_column("status") .attach_printable("Error adding select clause") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_select_column("refunds_status") .attach_printable("Error adding select clause") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_select_column("dispute_status") .attach_printable("Error adding select clause") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_select_column("(attempt_count = 1) as first_attempt") .attach_printable("Error adding select clause") .change_context(MetricsError::QueryBuildingError)?; auth.set_filter_clause(&mut query_builder) .change_context(MetricsError::QueryBuildingError)?; time_range .set_filter_clause(&mut query_builder) .change_context(MetricsError::QueryBuildingError)?; query_builder .add_group_by_clause("status") .attach_printable("Error adding group by clause") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_group_by_clause("refunds_status") .attach_printable("Error adding group by clause") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_group_by_clause("dispute_status") .attach_printable("Error adding group by clause") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_group_by_clause("first_attempt") .attach_printable("Error adding group by clause") .change_context(MetricsError::QueryBuildingError)?; query_builder .execute_query::<SankeyRow, _>(clickhouse_client) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(Ok) .collect() }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/sankey.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 2, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_1829070947046792397
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/filters.rs // Contains: 1 structs, 0 enums use api_models::analytics::{payment_intents::PaymentIntentDimensions, Granularity, TimeRange}; use common_utils::errors::ReportSwitchExt; use diesel_models::enums::{AuthenticationType, Currency, IntentStatus}; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult, LoadRow, }, }; pub trait PaymentIntentFilterAnalytics: LoadRow<PaymentIntentFilterRow> {} pub async fn get_payment_intent_filter_for_dimension<T>( dimension: PaymentIntentDimensions, merchant_id: &common_utils::id_type::MerchantId, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<PaymentIntentFilterRow>> where T: AnalyticsDataSource + PaymentIntentFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; query_builder.set_distinct(); query_builder .execute_query::<PaymentIntentFilterRow, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct PaymentIntentFilterRow { pub status: Option<DBEnumWrapper<IntentStatus>>, pub currency: Option<DBEnumWrapper<Currency>>, pub profile_id: Option<String>, pub connector: Option<String>, pub authentication_type: Option<DBEnumWrapper<AuthenticationType>>, pub payment_method: Option<String>, pub payment_method_type: Option<String>, pub card_network: Option<String>, pub merchant_id: Option<String>, pub card_last_4: Option<String>, pub card_issuer: Option<String>, pub error_reason: Option<String>, pub customer_id: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/filters.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2743507663115618249
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/accumulator.rs // Contains: 8 structs, 0 enums use api_models::analytics::payment_intents::PaymentIntentMetricsBucketValue; use bigdecimal::ToPrimitive; use diesel_models::enums as storage_enums; use super::metrics::PaymentIntentMetricRow; #[derive(Debug, Default)] pub struct PaymentIntentMetricsAccumulator { pub successful_smart_retries: CountAccumulator, pub total_smart_retries: CountAccumulator, pub smart_retried_amount: SmartRetriedAmountAccumulator, pub payment_intent_count: CountAccumulator, pub payments_success_rate: PaymentsSuccessRateAccumulator, pub payment_processed_amount: ProcessedAmountAccumulator, pub payments_distribution: PaymentsDistributionAccumulator, } #[derive(Debug, Default)] pub struct ErrorDistributionRow { pub count: i64, pub total: i64, pub error_message: String, } #[derive(Debug, Default)] pub struct ErrorDistributionAccumulator { pub error_vec: Vec<ErrorDistributionRow>, } #[derive(Debug, Default)] #[repr(transparent)] pub struct CountAccumulator { pub count: Option<i64>, } pub trait PaymentIntentMetricAccumulator { type MetricOutput; fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow); fn collect(self) -> Self::MetricOutput; } #[derive(Debug, Default)] pub struct SmartRetriedAmountAccumulator { pub amount: Option<i64>, pub amount_without_retries: Option<i64>, } #[derive(Debug, Default)] pub struct PaymentsSuccessRateAccumulator { pub success: u32, pub success_without_retries: u32, pub total: u32, } #[derive(Debug, Default)] pub struct ProcessedAmountAccumulator { pub count_with_retries: Option<i64>, pub total_with_retries: Option<i64>, pub count_without_retries: Option<i64>, pub total_without_retries: Option<i64>, } #[derive(Debug, Default)] pub struct PaymentsDistributionAccumulator { pub success_without_retries: u32, pub failed_without_retries: u32, pub total: u32, } impl PaymentIntentMetricAccumulator for CountAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) { self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.count.and_then(|i| u64::try_from(i).ok()) } } impl PaymentIntentMetricAccumulator for SmartRetriedAmountAccumulator { type MetricOutput = (Option<u64>, Option<u64>, Option<u64>, Option<u64>); #[inline] fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) { self.amount = match ( self.amount, metrics.total.as_ref().and_then(ToPrimitive::to_i64), ) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), }; if metrics.first_attempt.unwrap_or(0) == 1 { self.amount_without_retries = match ( self.amount_without_retries, metrics.total.as_ref().and_then(ToPrimitive::to_i64), ) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } else { self.amount_without_retries = Some(0); } } #[inline] fn collect(self) -> Self::MetricOutput { let with_retries = self.amount.and_then(|i| u64::try_from(i).ok()).or(Some(0)); let without_retries = self .amount_without_retries .and_then(|i| u64::try_from(i).ok()) .or(Some(0)); (with_retries, without_retries, Some(0), Some(0)) } } impl PaymentIntentMetricAccumulator for PaymentsSuccessRateAccumulator { type MetricOutput = ( Option<u32>, Option<u32>, Option<u32>, Option<f64>, Option<f64>, ); fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) { if let Some(ref status) = metrics.status { if status.as_ref() == &storage_enums::IntentStatus::Succeeded { if let Some(success) = metrics .count .and_then(|success| u32::try_from(success).ok()) { self.success += success; if metrics.first_attempt.unwrap_or(0) == 1 { self.success_without_retries += success; } } } if status.as_ref() != &storage_enums::IntentStatus::RequiresCustomerAction && status.as_ref() != &storage_enums::IntentStatus::RequiresPaymentMethod && status.as_ref() != &storage_enums::IntentStatus::RequiresMerchantAction && status.as_ref() != &storage_enums::IntentStatus::RequiresConfirmation { if let Some(total) = metrics.count.and_then(|total| u32::try_from(total).ok()) { self.total += total; } } } } fn collect(self) -> Self::MetricOutput { if self.total == 0 { (None, None, None, None, None) } else { let success = Some(self.success); let success_without_retries = Some(self.success_without_retries); let total = Some(self.total); let success_rate = match (success, total) { (Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)), _ => None, }; let success_without_retries_rate = match (success_without_retries, total) { (Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)), _ => None, }; ( success, success_without_retries, total, success_rate, success_without_retries_rate, ) } } } impl PaymentIntentMetricAccumulator for ProcessedAmountAccumulator { type MetricOutput = ( Option<u64>, Option<u64>, Option<u64>, Option<u64>, Option<u64>, Option<u64>, ); #[inline] fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) { self.total_with_retries = match ( self.total_with_retries, metrics.total.as_ref().and_then(ToPrimitive::to_i64), ) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), }; self.count_with_retries = match (self.count_with_retries, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), }; if metrics.first_attempt.unwrap_or(0) == 1 { self.total_without_retries = match ( self.total_without_retries, metrics.total.as_ref().and_then(ToPrimitive::to_i64), ) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), }; self.count_without_retries = match (self.count_without_retries, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), }; } } #[inline] fn collect(self) -> Self::MetricOutput { let total_with_retries = u64::try_from(self.total_with_retries.unwrap_or(0)).ok(); let count_with_retries = self.count_with_retries.and_then(|i| u64::try_from(i).ok()); let total_without_retries = u64::try_from(self.total_without_retries.unwrap_or(0)).ok(); let count_without_retries = self .count_without_retries .and_then(|i| u64::try_from(i).ok()); ( total_with_retries, count_with_retries, total_without_retries, count_without_retries, Some(0), Some(0), ) } } impl PaymentIntentMetricAccumulator for PaymentsDistributionAccumulator { type MetricOutput = (Option<f64>, Option<f64>); fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) { let first_attempt = metrics.first_attempt.unwrap_or(0); if let Some(ref status) = metrics.status { if status.as_ref() == &storage_enums::IntentStatus::Succeeded { if let Some(success) = metrics .count .and_then(|success| u32::try_from(success).ok()) { if first_attempt == 1 { self.success_without_retries += success; } } } if let Some(failed) = metrics.count.and_then(|failed| u32::try_from(failed).ok()) { if first_attempt == 0 || (first_attempt == 1 && status.as_ref() == &storage_enums::IntentStatus::Failed) { self.failed_without_retries += failed; } } if status.as_ref() != &storage_enums::IntentStatus::RequiresCustomerAction && status.as_ref() != &storage_enums::IntentStatus::RequiresPaymentMethod && status.as_ref() != &storage_enums::IntentStatus::RequiresMerchantAction && status.as_ref() != &storage_enums::IntentStatus::RequiresConfirmation { if let Some(total) = metrics.count.and_then(|total| u32::try_from(total).ok()) { self.total += total; } } } } fn collect(self) -> Self::MetricOutput { if self.total == 0 { (None, None) } else { let success_without_retries = Some(self.success_without_retries); let failed_without_retries = Some(self.failed_without_retries); let total = Some(self.total); let success_rate_without_retries = match (success_without_retries, total) { (Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)), _ => None, }; let failed_rate_without_retries = match (failed_without_retries, total) { (Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)), _ => None, }; (success_rate_without_retries, failed_rate_without_retries) } } } impl PaymentIntentMetricsAccumulator { pub fn collect(self) -> PaymentIntentMetricsBucketValue { let ( successful_payments, successful_payments_without_smart_retries, total_payments, payments_success_rate, payments_success_rate_without_smart_retries, ) = self.payments_success_rate.collect(); let ( smart_retried_amount, smart_retried_amount_without_smart_retries, smart_retried_amount_in_usd, smart_retried_amount_without_smart_retries_in_usd, ) = self.smart_retried_amount.collect(); let ( payment_processed_amount, payment_processed_count, payment_processed_amount_without_smart_retries, payment_processed_count_without_smart_retries, payment_processed_amount_in_usd, payment_processed_amount_without_smart_retries_in_usd, ) = self.payment_processed_amount.collect(); let ( payments_success_rate_distribution_without_smart_retries, payments_failure_rate_distribution_without_smart_retries, ) = self.payments_distribution.collect(); PaymentIntentMetricsBucketValue { successful_smart_retries: self.successful_smart_retries.collect(), total_smart_retries: self.total_smart_retries.collect(), smart_retried_amount, smart_retried_amount_in_usd, smart_retried_amount_without_smart_retries, smart_retried_amount_without_smart_retries_in_usd, payment_intent_count: self.payment_intent_count.collect(), successful_payments, successful_payments_without_smart_retries, total_payments, payments_success_rate, payments_success_rate_without_smart_retries, payment_processed_amount, payment_processed_count, payment_processed_amount_without_smart_retries, payment_processed_count_without_smart_retries, payments_success_rate_distribution_without_smart_retries, payments_failure_rate_distribution_without_smart_retries, payment_processed_amount_in_usd, payment_processed_amount_without_smart_retries_in_usd, } } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/accumulator.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 8, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-1035542661009029839
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/payments_success_rate.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentsSuccessRate; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for PaymentsSuccessRate where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentIntentDimensions::PaymentIntentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( None, i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/payments_success_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_4118027031236496561
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/total_smart_retries.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct TotalSmartRetries; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for TotalSmartRetries where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/total_smart_retries.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-8777554829590600193
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/payment_intent_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentIntentCount; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for PaymentIntentCount where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/payment_intent_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-7176286510056186042
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/successful_smart_retries.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::{ analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }, enums::IntentStatus, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct SuccessfulSmartRetries; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for SuccessfulSmartRetries where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) .switch()?; query_builder .add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/successful_smart_retries.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_6470519024002840546
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/smart_retried_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::{ analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }, enums::IntentStatus, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct SmartRetriedAmount; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for SmartRetriedAmount where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) .switch()?; query_builder .add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/smart_retried_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6796517442487105677
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/payment_processed_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentProcessedAmount; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for PaymentProcessedAmount where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentIntentDimensions::PaymentIntentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentIntentDimensions::PaymentIntentStatus, storage_enums::IntentStatus::Succeeded, ) .switch()?; query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( None, i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/payment_processed_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-7409103785906771717
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_success_rate.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentsSuccessRate; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for PaymentsSuccessRate where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentIntentDimensions::PaymentIntentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column("(attempt_count = 1) as first_attempt".to_string()) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("first_attempt") .attach_printable("Error grouping by first_attempt") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( None, i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_success_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-8175934283828328620
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/total_smart_retries.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct TotalSmartRetries; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for TotalSmartRetries where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/sessionized_metrics/total_smart_retries.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_4293773113087740220
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_distribution.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentsDistribution; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for PaymentsDistribution where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentIntentDimensions::PaymentIntentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column("(attempt_count = 1) as first_attempt") .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("first_attempt") .attach_printable("Error grouping by first_attempt") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( None, i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_distribution.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_5266096570594504484
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_intent_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentIntentCount; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for PaymentIntentCount where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_intent_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2142963052575438190
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/successful_smart_retries.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::{ analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }, enums::IntentStatus, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct SuccessfulSmartRetries; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for SuccessfulSmartRetries where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) .switch()?; query_builder .add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/sessionized_metrics/successful_smart_retries.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_3742885046701867208
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/smart_retried_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::{ analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }, enums::IntentStatus, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct SmartRetriedAmount; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for SmartRetriedAmount where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column("(attempt_count = 1) as first_attempt") .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder .add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt) .switch()?; query_builder .add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("first_attempt") .attach_printable("Error grouping by first_attempt") .switch()?; query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( i.status.as_ref().map(|i| i.0), i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/sessionized_metrics/smart_retried_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-2861600141889609443
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_processed_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ payment_intents::{ PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::PaymentIntentMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct PaymentProcessedAmount; #[async_trait::async_trait] impl<T> super::PaymentIntentMetric<T> for PaymentProcessedAmount where T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[PaymentIntentDimensions], auth: &AuthInfo, filters: &PaymentIntentFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(PaymentIntentDimensions::PaymentIntentStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column("(attempt_count = 1) as first_attempt") .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Sum { field: "amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause("first_attempt") .attach_printable("Error grouping by first_attempt") .switch()?; query_builder .add_group_by_clause("currency") .attach_printable("Error grouping by currency") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .add_filter_clause( PaymentIntentDimensions::PaymentIntentStatus, storage_enums::IntentStatus::Succeeded, ) .switch()?; query_builder .execute_query::<PaymentIntentMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( PaymentIntentMetricsBucketIdentifier::new( None, i.currency.as_ref().map(|i| i.0), i.profile_id.clone(), i.connector.clone(), i.authentication_type.as_ref().map(|i| i.0), i.payment_method.clone(), i.payment_method_type.clone(), i.card_network.clone(), i.merchant_id.clone(), i.card_last_4.clone(), i.card_issuer.clone(), i.error_reason.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_processed_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_1750898484548374452
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/active_payments/metrics.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ active_payments::{ActivePaymentsMetrics, ActivePaymentsMetricsBucketIdentifier}, Granularity, TimeRange, }; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult}, }; mod active_payments; use active_payments::ActivePayments; #[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)] pub struct ActivePaymentsMetricRow { pub count: Option<i64>, } pub trait ActivePaymentsMetricAnalytics: LoadRow<ActivePaymentsMetricRow> {} #[async_trait::async_trait] pub trait ActivePaymentsMetric<T> where T: AnalyticsDataSource + ActivePaymentsMetricAnalytics, { async fn load_metrics( &self, merchant_id: &common_utils::id_type::MerchantId, publishable_key: &str, time_range: &TimeRange, pool: &T, ) -> MetricsResult< HashSet<( ActivePaymentsMetricsBucketIdentifier, ActivePaymentsMetricRow, )>, >; } #[async_trait::async_trait] impl<T> ActivePaymentsMetric<T> for ActivePaymentsMetrics where T: AnalyticsDataSource + ActivePaymentsMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, merchant_id: &common_utils::id_type::MerchantId, publishable_key: &str, time_range: &TimeRange, pool: &T, ) -> MetricsResult< HashSet<( ActivePaymentsMetricsBucketIdentifier, ActivePaymentsMetricRow, )>, > { match self { Self::ActivePayments => { ActivePayments .load_metrics(merchant_id, publishable_key, time_range, pool) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/active_payments/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-4476681898587554312
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/active_payments/accumulator.rs // Contains: 2 structs, 0 enums use api_models::analytics::active_payments::ActivePaymentsMetricsBucketValue; use super::metrics::ActivePaymentsMetricRow; #[derive(Debug, Default)] pub struct ActivePaymentsMetricsAccumulator { pub active_payments: CountAccumulator, } #[derive(Debug, Default)] #[repr(transparent)] pub struct CountAccumulator { pub count: Option<i64>, } pub trait ActivePaymentsMetricAccumulator { type MetricOutput; fn add_metrics_bucket(&mut self, metrics: &ActivePaymentsMetricRow); fn collect(self) -> Self::MetricOutput; } impl ActivePaymentsMetricAccumulator for CountAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &ActivePaymentsMetricRow) { self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.count.and_then(|i| u64::try_from(i).ok()) } } impl ActivePaymentsMetricsAccumulator { #[allow(dead_code)] pub fn collect(self) -> ActivePaymentsMetricsBucketValue { ActivePaymentsMetricsBucketValue { active_payments: self.active_payments.collect(), } } }
{ "crate": "analytics", "file": "crates/analytics/src/active_payments/accumulator.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 2, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-4403907775419956105
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/active_payments/metrics/active_payments.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ active_payments::ActivePaymentsMetricsBucketIdentifier, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::ActivePaymentsMetricRow; use crate::{ query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct ActivePayments; #[async_trait::async_trait] impl<T> super::ActivePaymentsMetric<T> for ActivePayments where T: AnalyticsDataSource + super::ActivePaymentsMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, merchant_id: &common_utils::id_type::MerchantId, publishable_key: &str, time_range: &TimeRange, pool: &T, ) -> MetricsResult< HashSet<( ActivePaymentsMetricsBucketIdentifier, ActivePaymentsMetricRow, )>, > { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ActivePaymentsAnalytics); query_builder .add_select_column(Aggregate::DistinctCount { field: "payment_id", alias: Some("count"), }) .switch()?; query_builder .add_custom_filter_clause( "merchant_id", format!("'{}','{}'", merchant_id.get_string_repr(), publishable_key), FilterTypes::In, ) .switch()?; query_builder .add_negative_filter_clause("payment_id", "") .switch()?; query_builder .add_custom_filter_clause( "flow_type", "'sdk', 'payment', 'payment_redirection_response'", FilterTypes::In, ) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .execute_query::<ActivePaymentsMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| Ok((ActivePaymentsMetricsBucketIdentifier::new(None), i))) .collect::<error_stack::Result< HashSet<( ActivePaymentsMetricsBucketIdentifier, ActivePaymentsMetricRow, )>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/active_payments/metrics/active_payments.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_1435066049262615040
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/events.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ sdk_events::{SdkEventNames, SdkEventsRequest}, Granularity, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use strum::IntoEnumIterator; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, }; pub trait SdkEventsFilterAnalytics: LoadRow<SdkEventsResult> {} pub async fn get_sdk_event<T>( publishable_key: &String, request: SdkEventsRequest, pool: &T, ) -> FiltersResult<Vec<SdkEventsResult>> where T: AnalyticsDataSource + SdkEventsFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let static_event_list = SdkEventNames::iter() .map(|i| format!("'{}'", i.as_ref())) .collect::<Vec<String>>() .join(","); let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEvents); query_builder.add_select_column("*").switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_filter_clause("payment_id", &request.payment_id) .switch()?; query_builder .add_custom_filter_clause("event_name", static_event_list, FilterTypes::In) .switch()?; let _ = &request .time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; //TODO!: update the execute_query function to return reports instead of plain errors... query_builder .execute_query::<SdkEventsResult, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct SdkEventsResult { pub merchant_id: common_utils::id_type::MerchantId, pub payment_id: common_utils::id_type::PaymentId, pub event_name: Option<String>, pub log_type: Option<String>, pub first_event: bool, pub browser_name: Option<String>, pub browser_version: Option<String>, pub source: Option<String>, pub category: Option<String>, pub version: Option<String>, pub value: Option<String>, pub platform: Option<String>, pub component: Option<String>, pub payment_method: Option<String>, pub payment_experience: Option<String>, pub latency: Option<u64>, #[serde(with = "common_utils::custom_serde::iso8601")] pub created_at_precise: PrimitiveDateTime, #[serde(with = "common_utils::custom_serde::iso8601")] pub created_at: PrimitiveDateTime, }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/events.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-5533933433416255981
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetrics, SdkEventMetricsBucketIdentifier, }, Granularity, TimeRange, }; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult}, }; mod average_payment_time; mod load_time; mod payment_attempts; mod payment_data_filled_count; mod payment_method_selected_count; mod payment_methods_call_count; mod sdk_initiated_count; mod sdk_rendered_count; use average_payment_time::AveragePaymentTime; use load_time::LoadTime; use payment_attempts::PaymentAttempts; use payment_data_filled_count::PaymentDataFilledCount; use payment_method_selected_count::PaymentMethodSelectedCount; use payment_methods_call_count::PaymentMethodsCallCount; use sdk_initiated_count::SdkInitiatedCount; use sdk_rendered_count::SdkRenderedCount; #[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)] pub struct SdkEventMetricRow { pub total: Option<bigdecimal::BigDecimal>, pub count: Option<i64>, pub time_bucket: Option<String>, pub payment_method: Option<String>, pub platform: Option<String>, pub browser_name: Option<String>, pub source: Option<String>, pub component: Option<String>, pub payment_experience: Option<String>, } pub trait SdkEventMetricAnalytics: LoadRow<SdkEventMetricRow> {} #[async_trait::async_trait] pub trait SdkEventMetric<T> where T: AnalyticsDataSource + SdkEventMetricAnalytics, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>>; } #[async_trait::async_trait] impl<T> SdkEventMetric<T> for SdkEventMetrics where T: AnalyticsDataSource + SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { match self { Self::PaymentAttempts => { PaymentAttempts .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } Self::PaymentMethodsCallCount => { PaymentMethodsCallCount .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } Self::SdkRenderedCount => { SdkRenderedCount .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } Self::SdkInitiatedCount => { SdkInitiatedCount .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } Self::PaymentMethodSelectedCount => { PaymentMethodSelectedCount .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } Self::PaymentDataFilledCount => { PaymentDataFilledCount .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } Self::AveragePaymentTime => { AveragePaymentTime .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } Self::LoadTime => { LoadTime .load_metrics( dimensions, publishable_key, filters, granularity, time_range, pool, ) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2655701973702535895
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/filters.rs // Contains: 1 structs, 0 enums use api_models::analytics::{sdk_events::SdkEventDimensions, Granularity, TimeRange}; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow}, }; pub trait SdkEventFilterAnalytics: LoadRow<SdkEventFilter> {} pub async fn get_sdk_event_filter_for_dimension<T>( dimension: SdkEventDimensions, publishable_key: &String, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<SdkEventFilter>> where T: AnalyticsDataSource + SdkEventFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder.set_distinct(); query_builder .execute_query::<SdkEventFilter, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct SdkEventFilter { pub payment_method: Option<String>, pub platform: Option<String>, pub browser_name: Option<String>, pub source: Option<String>, pub component: Option<String>, pub payment_experience: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/filters.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-3282617875635193182
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/accumulator.rs // Contains: 3 structs, 0 enums use api_models::analytics::sdk_events::SdkEventMetricsBucketValue; use router_env::logger; use super::metrics::SdkEventMetricRow; #[derive(Debug, Default)] pub struct SdkEventMetricsAccumulator { pub payment_attempts: CountAccumulator, pub payment_methods_call_count: CountAccumulator, pub average_payment_time: CountAccumulator, pub load_time: CountAccumulator, pub sdk_initiated_count: CountAccumulator, pub sdk_rendered_count: CountAccumulator, pub payment_method_selected_count: CountAccumulator, pub payment_data_filled_count: CountAccumulator, } #[derive(Debug, Default)] #[repr(transparent)] pub struct CountAccumulator { pub count: Option<i64>, } #[derive(Debug, Default)] pub struct AverageAccumulator { pub total: u32, pub count: u32, } pub trait SdkEventMetricAccumulator { type MetricOutput; fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow); fn collect(self) -> Self::MetricOutput; } impl SdkEventMetricAccumulator for CountAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow) { self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.count.and_then(|i| u64::try_from(i).ok()) } } impl SdkEventMetricAccumulator for AverageAccumulator { type MetricOutput = Option<f64>; fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow) { let total = metrics .total .as_ref() .and_then(bigdecimal::ToPrimitive::to_u32); let count = metrics.count.and_then(|total| u32::try_from(total).ok()); match (total, count) { (Some(total), Some(count)) => { self.total += total; self.count += count; } _ => { logger::error!(message="Dropping metrics for average accumulator", metric=?metrics); } } } fn collect(self) -> Self::MetricOutput { if self.count == 0 { None } else { Some(f64::from(self.total) / f64::from(self.count)) } } } impl SdkEventMetricsAccumulator { #[allow(dead_code)] pub fn collect(self) -> SdkEventMetricsBucketValue { SdkEventMetricsBucketValue { payment_attempts: self.payment_attempts.collect(), payment_methods_call_count: self.payment_methods_call_count.collect(), average_payment_time: self.average_payment_time.collect(), load_time: self.load_time.collect(), sdk_initiated_count: self.sdk_initiated_count.collect(), sdk_rendered_count: self.sdk_rendered_count.collect(), payment_method_selected_count: self.payment_method_selected_count.collect(), payment_data_filled_count: self.payment_data_filled_count.collect(), } } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/accumulator.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 3, "num_tables": null, "score": null, "total_crates": null }
file_analytics_3027679722989992141
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentMethodSelectedCount; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for PaymentMethodSelectedCount where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::PaymentMethodChanged) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2410993363124681725
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentDataFilledCount; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for PaymentDataFilledCount where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::PaymentDataFilled) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-648657497886219987
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/load_time.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct LoadTime; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for LoadTime where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Percentile { field: "latency", alias: Some("count"), percentile: Some(&50), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::AppRendered) .switch()?; query_builder .add_custom_filter_clause("latency", 0, FilterTypes::Gt) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/load_time.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-2060238434471813048
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct SdkRenderedCount; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for SdkRenderedCount where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::AppRendered) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_657957134502499920
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentMethodsCallCount; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for PaymentMethodsCallCount where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::PaymentMethodsCall) .switch()?; query_builder .add_filter_clause("log_type", "INFO") .switch()?; query_builder .add_filter_clause("category", "API") .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-4508081569465748572
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/average_payment_time.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct AveragePaymentTime; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for AveragePaymentTime where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Percentile { field: "latency", alias: Some("count"), percentile: Some(&50), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::PaymentAttempt) .switch()?; query_builder .add_custom_filter_clause("latency", 0, FilterTypes::Gt) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/average_payment_time.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_1091584191078819435
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct SdkInitiatedCount; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for SdkInitiatedCount where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::OrcaElementsCalled) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2838312163727501477
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/sdk_events/metrics/payment_attempts.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ sdk_events::{ SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames, }, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::SdkEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct PaymentAttempts; #[async_trait::async_trait] impl<T> super::SdkEventMetric<T> for PaymentAttempts where T: AnalyticsDataSource + super::SdkEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[SdkEventDimensions], publishable_key: &str, filters: &SdkEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics); let dimensions = dimensions.to_vec(); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; if let Some(granularity) = granularity { query_builder .add_granularity_in_mins(granularity) .switch()?; } filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", publishable_key) .switch()?; query_builder .add_bool_filter_clause("first_event", 1) .switch()?; query_builder .add_filter_clause("event_name", SdkEventNames::PaymentAttempt) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(_granularity) = granularity.as_ref() { query_builder .add_group_by_clause("time_bucket") .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<SdkEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( SdkEventMetricsBucketIdentifier::new( i.payment_method.clone(), i.platform.clone(), i.browser_name.clone(), i.source.clone(), i.component.clone(), i.payment_experience.clone(), i.time_bucket.clone(), ), i, )) }) .collect::<error_stack::Result< HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/sdk_events/metrics/payment_attempts.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6729736530401334862
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/metrics.rs // Contains: 1 structs, 0 enums mod dispute_status_metric; mod sessionized_metrics; mod total_amount_disputed; mod total_dispute_lost_amount; use std::collections::HashSet; use api_models::analytics::{ disputes::{DisputeDimensions, DisputeFilters, DisputeMetrics, DisputeMetricsBucketIdentifier}, Granularity, }; use common_utils::types::TimeRange; use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; use self::{ dispute_status_metric::DisputeStatusMetric, total_amount_disputed::TotalAmountDisputed, total_dispute_lost_amount::TotalDisputeLostAmount, }; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, }; #[derive(Debug, Eq, PartialEq, serde::Deserialize, Hash)] pub struct DisputeMetricRow { pub dispute_stage: Option<DBEnumWrapper<storage_enums::DisputeStage>>, pub dispute_status: Option<DBEnumWrapper<storage_enums::DisputeStatus>>, pub connector: Option<String>, pub currency: Option<DBEnumWrapper<storage_enums::Currency>>, pub total: Option<bigdecimal::BigDecimal>, pub count: Option<i64>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, } pub trait DisputeMetricAnalytics: LoadRow<DisputeMetricRow> {} #[async_trait::async_trait] pub trait DisputeMetric<T> where T: AnalyticsDataSource + DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>; } #[async_trait::async_trait] impl<T> DisputeMetric<T> for DisputeMetrics where T: AnalyticsDataSource + DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> { match self { Self::TotalAmountDisputed => { TotalAmountDisputed::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::DisputeStatusMetric => { DisputeStatusMetric::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::TotalDisputeLostAmount => { TotalDisputeLostAmount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedTotalAmountDisputed => { sessionized_metrics::TotalAmountDisputed::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedDisputeStatusMetric => { sessionized_metrics::DisputeStatusMetric::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedTotalDisputeLostAmount => { sessionized_metrics::TotalDisputeLostAmount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6195876028268803832
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/filters.rs // Contains: 1 structs, 0 enums use api_models::analytics::{disputes::DisputeDimensions, Granularity, TimeRange}; use common_utils::errors::ReportSwitchExt; use diesel_models::enums::Currency; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult, LoadRow, }, }; pub trait DisputeFilterAnalytics: LoadRow<DisputeFilterRow> {} pub async fn get_dispute_filter_for_dimension<T>( dimension: DisputeDimensions, auth: &AuthInfo, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<DisputeFilterRow>> where T: AnalyticsDataSource + DisputeFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder.set_distinct(); query_builder .execute_query::<DisputeFilterRow, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct DisputeFilterRow { pub connector: Option<String>, pub dispute_status: Option<String>, pub connector_status: Option<String>, pub dispute_stage: Option<String>, pub currency: Option<DBEnumWrapper<Currency>>, }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/filters.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_9176771423526464352
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/accumulators.rs // Contains: 3 structs, 0 enums use api_models::analytics::disputes::DisputeMetricsBucketValue; use diesel_models::enums as storage_enums; use super::metrics::DisputeMetricRow; #[derive(Debug, Default)] pub struct DisputeMetricsAccumulator { pub disputes_status_rate: RateAccumulator, pub disputed_amount: DisputedAmountAccumulator, pub dispute_lost_amount: DisputedAmountAccumulator, } #[derive(Debug, Default)] pub struct RateAccumulator { pub won_count: i64, pub challenged_count: i64, pub lost_count: i64, pub total: i64, } #[derive(Debug, Default)] #[repr(transparent)] pub struct DisputedAmountAccumulator { pub total: Option<i64>, } pub trait DisputeMetricAccumulator { type MetricOutput; fn add_metrics_bucket(&mut self, metrics: &DisputeMetricRow); fn collect(self) -> Self::MetricOutput; } impl DisputeMetricAccumulator for DisputedAmountAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &DisputeMetricRow) { self.total = match ( self.total, metrics .total .as_ref() .and_then(bigdecimal::ToPrimitive::to_i64), ) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.total.and_then(|i| u64::try_from(i).ok()) } } impl DisputeMetricAccumulator for RateAccumulator { type MetricOutput = Option<(Option<u64>, Option<u64>, Option<u64>, Option<u64>)>; fn add_metrics_bucket(&mut self, metrics: &DisputeMetricRow) { if let Some(ref dispute_status) = metrics.dispute_status { if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeChallenged { self.challenged_count += metrics.count.unwrap_or_default(); } if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeWon { self.won_count += metrics.count.unwrap_or_default(); } if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeLost { self.lost_count += metrics.count.unwrap_or_default(); } }; self.total += metrics.count.unwrap_or_default(); } fn collect(self) -> Self::MetricOutput { if self.total <= 0 { Some((None, None, None, None)) } else { Some(( u64::try_from(self.challenged_count).ok(), u64::try_from(self.won_count).ok(), u64::try_from(self.lost_count).ok(), u64::try_from(self.total).ok(), )) } } } impl DisputeMetricsAccumulator { pub fn collect(self) -> DisputeMetricsBucketValue { let (challenge_rate, won_rate, lost_rate, total_dispute) = self.disputes_status_rate.collect().unwrap_or_default(); DisputeMetricsBucketValue { disputes_challenged: challenge_rate, disputes_won: won_rate, disputes_lost: lost_rate, disputed_amount: self.disputed_amount.collect(), dispute_lost_amount: self.dispute_lost_amount.collect(), total_dispute, } } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/accumulators.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 3, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-5014267541494366604
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/metrics/total_dispute_lost_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::DisputeMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct TotalDisputeLostAmount {} #[async_trait::async_trait] impl<T> super::DisputeMetric<T> for TotalDisputeLostAmount where T: AnalyticsDataSource + super::DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> where T: AnalyticsDataSource + super::DisputeMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "dispute_amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause("dispute_status", "dispute_lost") .switch()?; query_builder .execute_query::<DisputeMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( DisputeMetricsBucketIdentifier::new( i.dispute_stage.as_ref().map(|i| i.0), i.connector.clone(), i.currency.as_ref().map(|i| i.0), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/metrics/total_dispute_lost_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_1734926402632274057
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/metrics/dispute_status_metric.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::DisputeMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct DisputeStatusMetric {} #[async_trait::async_trait] impl<T> super::DisputeMetric<T> for DisputeStatusMetric where T: AnalyticsDataSource + super::DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> where T: AnalyticsDataSource + super::DisputeMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::Dispute); for dim in dimensions { query_builder.add_select_column(dim).switch()?; } query_builder.add_select_column("dispute_status").switch()?; query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions { query_builder.add_group_by_clause(dim).switch()?; } query_builder .add_group_by_clause("dispute_status") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .execute_query::<DisputeMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( DisputeMetricsBucketIdentifier::new( i.dispute_stage.as_ref().map(|i| i.0), i.connector.clone(), i.currency.as_ref().map(|i| i.0), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/metrics/dispute_status_metric.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-2655395975274542667
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/metrics/total_amount_disputed.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::DisputeMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct TotalAmountDisputed {} #[async_trait::async_trait] impl<T> super::DisputeMetric<T> for TotalAmountDisputed where T: AnalyticsDataSource + super::DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> where T: AnalyticsDataSource + super::DisputeMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute); for dim in dimensions { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "dispute_amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause("dispute_status", "dispute_won") .switch()?; query_builder .execute_query::<DisputeMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( DisputeMetricsBucketIdentifier::new( i.dispute_stage.as_ref().map(|i| i.0), i.connector.clone(), i.currency.as_ref().map(|i| i.0), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/metrics/total_amount_disputed.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-2547712530989514383
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/metrics/sessionized_metrics/total_dispute_lost_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::DisputeMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct TotalDisputeLostAmount {} #[async_trait::async_trait] impl<T> super::DisputeMetric<T> for TotalDisputeLostAmount where T: AnalyticsDataSource + super::DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> where T: AnalyticsDataSource + super::DisputeMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::DisputeSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "dispute_amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause("dispute_status", "dispute_lost") .switch()?; query_builder .execute_query::<DisputeMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( DisputeMetricsBucketIdentifier::new( i.dispute_stage.as_ref().map(|i| i.0), i.connector.clone(), i.currency.as_ref().map(|i| i.0), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/metrics/sessionized_metrics/total_dispute_lost_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-7079747393308443327
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/metrics/sessionized_metrics/dispute_status_metric.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::DisputeMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct DisputeStatusMetric {} #[async_trait::async_trait] impl<T> super::DisputeMetric<T> for DisputeStatusMetric where T: AnalyticsDataSource + super::DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> where T: AnalyticsDataSource + super::DisputeMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::DisputeSessionized); for dim in dimensions { query_builder.add_select_column(dim).switch()?; } query_builder.add_select_column("dispute_status").switch()?; query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions { query_builder.add_group_by_clause(dim).switch()?; } query_builder .add_group_by_clause("dispute_status") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .execute_query::<DisputeMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( DisputeMetricsBucketIdentifier::new( i.dispute_stage.as_ref().map(|i| i.0), i.connector.clone(), i.currency.as_ref().map(|i| i.0), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/metrics/sessionized_metrics/dispute_status_metric.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_1077153344562300376
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/disputes/metrics/sessionized_metrics/total_amount_disputed.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::DisputeMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct TotalAmountDisputed {} #[async_trait::async_trait] impl<T> super::DisputeMetric<T> for TotalAmountDisputed where T: AnalyticsDataSource + super::DisputeMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[DisputeDimensions], auth: &AuthInfo, filters: &DisputeFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> where T: AnalyticsDataSource + super::DisputeMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::DisputeSessionized); for dim in dimensions { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "dispute_amount", alias: Some("total"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause("dispute_status", "dispute_won") .switch()?; query_builder .execute_query::<DisputeMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( DisputeMetricsBucketIdentifier::new( i.dispute_stage.as_ref().map(|i| i.0), i.connector.clone(), i.currency.as_ref().map(|i| i.0), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/disputes/metrics/sessionized_metrics/total_amount_disputed.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-1576302864471241901
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/frm/metrics.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ frm::{FrmDimensions, FrmFilters, FrmMetrics, FrmMetricsBucketIdentifier, FrmTransactionType}, Granularity, TimeRange, }; use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; mod frm_blocked_rate; mod frm_triggered_attempts; use frm_blocked_rate::FrmBlockedRate; use frm_triggered_attempts::FrmTriggeredAttempts; use crate::{ query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, }; #[derive(Debug, Eq, PartialEq, serde::Deserialize)] pub struct FrmMetricRow { pub frm_name: Option<String>, pub frm_status: Option<DBEnumWrapper<storage_enums::FraudCheckStatus>>, pub frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>>, pub total: Option<bigdecimal::BigDecimal>, pub count: Option<i64>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, } pub trait FrmMetricAnalytics: LoadRow<FrmMetricRow> {} #[async_trait::async_trait] pub trait FrmMetric<T> where T: AnalyticsDataSource + FrmMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[FrmDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &FrmFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>>; } #[async_trait::async_trait] impl<T> FrmMetric<T> for FrmMetrics where T: AnalyticsDataSource + FrmMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[FrmDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &FrmFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> { match self { Self::FrmTriggeredAttempts => { FrmTriggeredAttempts::default() .load_metrics( dimensions, merchant_id, filters, granularity, time_range, pool, ) .await } Self::FrmBlockedRate => { FrmBlockedRate::default() .load_metrics( dimensions, merchant_id, filters, granularity, time_range, pool, ) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/frm/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_4514937072988842451
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/frm/filters.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ frm::{FrmDimensions, FrmTransactionType}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums::FraudCheckStatus; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult, LoadRow, }, }; pub trait FrmFilterAnalytics: LoadRow<FrmFilterRow> {} pub async fn get_frm_filter_for_dimension<T>( dimension: FrmDimensions, merchant_id: &common_utils::id_type::MerchantId, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<FrmFilterRow>> where T: AnalyticsDataSource + FrmFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; query_builder.set_distinct(); query_builder .execute_query::<FrmFilterRow, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct FrmFilterRow { pub frm_status: Option<DBEnumWrapper<FraudCheckStatus>>, pub frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>>, pub frm_name: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/frm/filters.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2927711186126624942
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/frm/accumulator.rs // Contains: 3 structs, 0 enums use api_models::analytics::frm::FrmMetricsBucketValue; use common_enums::enums as storage_enums; use super::metrics::FrmMetricRow; #[derive(Debug, Default)] pub struct FrmMetricsAccumulator { pub frm_triggered_attempts: TriggeredAttemptsAccumulator, pub frm_blocked_rate: BlockedRateAccumulator, } #[derive(Debug, Default)] #[repr(transparent)] pub struct TriggeredAttemptsAccumulator { pub count: Option<i64>, } #[derive(Debug, Default)] pub struct BlockedRateAccumulator { pub fraud: i64, pub total: i64, } pub trait FrmMetricAccumulator { type MetricOutput; fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow); fn collect(self) -> Self::MetricOutput; } impl FrmMetricAccumulator for TriggeredAttemptsAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow) { self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.count.and_then(|i| u64::try_from(i).ok()) } } impl FrmMetricAccumulator for BlockedRateAccumulator { type MetricOutput = Option<f64>; fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow) { if let Some(ref frm_status) = metrics.frm_status { if frm_status.as_ref() == &storage_enums::FraudCheckStatus::Fraud { self.fraud += metrics.count.unwrap_or_default(); } }; self.total += metrics.count.unwrap_or_default(); } fn collect(self) -> Self::MetricOutput { if self.total <= 0 { None } else { Some( f64::from(u32::try_from(self.fraud).ok()?) * 100.0 / f64::from(u32::try_from(self.total).ok()?), ) } } } impl FrmMetricsAccumulator { pub fn collect(self) -> FrmMetricsBucketValue { FrmMetricsBucketValue { frm_blocked_rate: self.frm_blocked_rate.collect(), frm_triggered_attempts: self.frm_triggered_attempts.collect(), } } }
{ "crate": "analytics", "file": "crates/analytics/src/frm/accumulator.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 3, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-1630491441212462412
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/frm/metrics/frm_triggered_attempts.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ frm::{FrmDimensions, FrmFilters, FrmMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::FrmMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct FrmTriggeredAttempts {} #[async_trait::async_trait] impl<T> super::FrmMetric<T> for FrmTriggeredAttempts where T: AnalyticsDataSource + super::FrmMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[FrmDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &FrmFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<FrmMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( FrmMetricsBucketIdentifier::new( i.frm_name.as_ref().map(|i| i.to_string()), i.frm_status.as_ref().map(|i| i.0.to_string()), i.frm_transaction_type.as_ref().map(|i| i.0.to_string()), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<Vec<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/frm/metrics/frm_triggered_attempts.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_5532646158856928526
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/frm/metrics/frm_blocked_rate.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ frm::{FrmDimensions, FrmFilters, FrmMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::FrmMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct FrmBlockedRate {} #[async_trait::async_trait] impl<T> super::FrmMetric<T> for FrmBlockedRate where T: AnalyticsDataSource + super::FrmMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[FrmDimensions], merchant_id: &common_utils::id_type::MerchantId, filters: &FrmFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> where T: AnalyticsDataSource + super::FrmMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::FraudCheck); let mut dimensions = dimensions.to_vec(); dimensions.push(FrmDimensions::FrmStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; query_builder .add_filter_clause("merchant_id", merchant_id) .switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .execute_query::<FrmMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( FrmMetricsBucketIdentifier::new( i.frm_name.as_ref().map(|i| i.to_string()), None, i.frm_transaction_type.as_ref().map(|i| i.0.to_string()), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/frm/metrics/frm_blocked_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-257159451235382647
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{ AuthEventDimensions, AuthEventFilters, AuthEventMetrics, AuthEventMetricsBucketIdentifier, }, Granularity, TimeRange, }; use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; use crate::{ query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, AuthInfo, }; mod authentication_attempt_count; mod authentication_count; mod authentication_error_message; mod authentication_exemption_approved_count; mod authentication_exemption_requested_count; mod authentication_funnel; mod authentication_success_count; mod challenge_attempt_count; mod challenge_flow_count; mod challenge_success_count; mod frictionless_flow_count; mod frictionless_success_count; use authentication_attempt_count::AuthenticationAttemptCount; use authentication_count::AuthenticationCount; use authentication_error_message::AuthenticationErrorMessage; use authentication_exemption_approved_count::AuthenticationExemptionApprovedCount; use authentication_exemption_requested_count::AuthenticationExemptionRequestedCount; use authentication_funnel::AuthenticationFunnel; use authentication_success_count::AuthenticationSuccessCount; use challenge_attempt_count::ChallengeAttemptCount; use challenge_flow_count::ChallengeFlowCount; use challenge_success_count::ChallengeSuccessCount; use frictionless_flow_count::FrictionlessFlowCount; use frictionless_success_count::FrictionlessSuccessCount; #[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)] pub struct AuthEventMetricRow { pub count: Option<i64>, pub authentication_status: Option<DBEnumWrapper<storage_enums::AuthenticationStatus>>, pub trans_status: Option<DBEnumWrapper<storage_enums::TransactionStatus>>, pub authentication_type: Option<DBEnumWrapper<storage_enums::DecoupledAuthenticationType>>, pub error_message: Option<String>, pub authentication_connector: Option<DBEnumWrapper<storage_enums::AuthenticationConnectors>>, pub message_version: Option<String>, pub acs_reference_number: Option<String>, pub platform: Option<String>, pub mcc: Option<String>, pub currency: Option<DBEnumWrapper<storage_enums::Currency>>, pub merchant_country: Option<String>, pub billing_country: Option<String>, pub shipping_country: Option<String>, pub issuer_country: Option<String>, pub earliest_supported_version: Option<String>, pub latest_supported_version: Option<String>, pub whitelist_decision: Option<bool>, pub device_manufacturer: Option<String>, pub device_type: Option<String>, pub device_brand: Option<String>, pub device_os: Option<String>, pub device_display: Option<String>, pub browser_name: Option<String>, pub browser_version: Option<String>, pub issuer_id: Option<String>, pub scheme_name: Option<String>, pub exemption_requested: Option<bool>, pub exemption_accepted: Option<bool>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, } pub trait AuthEventMetricAnalytics: LoadRow<AuthEventMetricRow> {} #[async_trait::async_trait] pub trait AuthEventMetric<T> where T: AnalyticsDataSource + AuthEventMetricAnalytics, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>>; } #[async_trait::async_trait] impl<T> AuthEventMetric<T> for AuthEventMetrics where T: AnalyticsDataSource + AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { match self { Self::AuthenticationCount => { AuthenticationCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::AuthenticationAttemptCount => { AuthenticationAttemptCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::AuthenticationSuccessCount => { AuthenticationSuccessCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::ChallengeFlowCount => { ChallengeFlowCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::ChallengeAttemptCount => { ChallengeAttemptCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::ChallengeSuccessCount => { ChallengeSuccessCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::FrictionlessFlowCount => { FrictionlessFlowCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::FrictionlessSuccessCount => { FrictionlessSuccessCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::AuthenticationErrorMessage => { AuthenticationErrorMessage .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::AuthenticationFunnel => { AuthenticationFunnel .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::AuthenticationExemptionApprovedCount => { AuthenticationExemptionApprovedCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } Self::AuthenticationExemptionRequestedCount => { AuthenticationExemptionRequestedCount .load_metrics(auth, dimensions, filters, granularity, time_range, pool) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_7236319839503172795
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/sankey.rs // Contains: 1 structs, 0 enums use common_enums::AuthenticationStatus; use common_utils::{ errors::ParsingError, types::{authentication::AuthInfo, TimeRange}, }; use error_stack::ResultExt; use router_env::logger; use crate::{ clickhouse::ClickhouseClient, query::{Aggregate, QueryBuilder, QueryFilter}, types::{AnalyticsCollection, MetricsError, MetricsResult}, }; #[derive(Debug, serde::Deserialize, serde::Serialize)] pub struct SankeyRow { pub count: i64, pub authentication_status: Option<AuthenticationStatus>, pub exemption_requested: Option<bool>, pub exemption_accepted: Option<bool>, } impl TryInto<SankeyRow> for serde_json::Value { type Error = error_stack::Report<ParsingError>; fn try_into(self) -> Result<SankeyRow, Self::Error> { logger::debug!("Parsing SankeyRow from {:?}", self); serde_json::from_value(self).change_context(ParsingError::StructParseFailure( "Failed to parse Sankey in clickhouse results", )) } } pub async fn get_sankey_data( clickhouse_client: &ClickhouseClient, auth: &AuthInfo, time_range: &TimeRange, ) -> MetricsResult<Vec<SankeyRow>> { let mut query_builder = QueryBuilder::<ClickhouseClient>::new(AnalyticsCollection::Authentications); query_builder .add_select_column(Aggregate::<String>::Count { field: None, alias: Some("count"), }) .change_context(MetricsError::QueryBuildingError)?; query_builder .add_select_column("exemption_requested") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_select_column("exemption_accepted") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_select_column("authentication_status") .change_context(MetricsError::QueryBuildingError)?; auth.set_filter_clause(&mut query_builder) .change_context(MetricsError::QueryBuildingError)?; time_range .set_filter_clause(&mut query_builder) .change_context(MetricsError::QueryBuildingError)?; query_builder .add_group_by_clause("exemption_requested") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_group_by_clause("exemption_accepted") .change_context(MetricsError::QueryBuildingError)?; query_builder .add_group_by_clause("authentication_status") .change_context(MetricsError::QueryBuildingError)?; query_builder .execute_query::<SankeyRow, _>(clickhouse_client) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(Ok) .collect() }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/sankey.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2834887697820958170
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/filters.rs // Contains: 1 structs, 0 enums use api_models::analytics::{auth_events::AuthEventDimensions, Granularity, TimeRange}; use common_enums::{Currency, DecoupledAuthenticationType}; use common_utils::errors::ReportSwitchExt; use diesel_models::enums::{AuthenticationConnectors, AuthenticationStatus, TransactionStatus}; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult, LoadRow, }, }; pub trait AuthEventFilterAnalytics: LoadRow<AuthEventFilterRow> {} pub async fn get_auth_events_filter_for_dimension<T>( dimension: AuthEventDimensions, auth: &AuthInfo, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<AuthEventFilterRow>> where T: AnalyticsDataSource + AuthEventFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder.set_distinct(); auth.set_filter_clause(&mut query_builder).switch()?; query_builder .execute_query::<AuthEventFilterRow, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct AuthEventFilterRow { pub authentication_status: Option<DBEnumWrapper<AuthenticationStatus>>, pub trans_status: Option<DBEnumWrapper<TransactionStatus>>, pub authentication_type: Option<DBEnumWrapper<DecoupledAuthenticationType>>, pub error_message: Option<String>, pub authentication_connector: Option<DBEnumWrapper<AuthenticationConnectors>>, pub message_version: Option<String>, pub acs_reference_number: Option<String>, pub platform: Option<String>, pub mcc: Option<String>, pub currency: Option<DBEnumWrapper<Currency>>, pub merchant_country: Option<String>, pub billing_country: Option<String>, pub shipping_country: Option<String>, pub issuer_country: Option<String>, pub earliest_supported_version: Option<String>, pub latest_supported_version: Option<String>, pub whitelist_decision: Option<bool>, pub device_manufacturer: Option<String>, pub device_type: Option<String>, pub device_brand: Option<String>, pub device_os: Option<String>, pub device_display: Option<String>, pub browser_name: Option<String>, pub browser_version: Option<String>, pub issuer_id: Option<String>, pub scheme_name: Option<String>, pub exemption_requested: Option<bool>, pub exemption_accepted: Option<bool>, }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/filters.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2890663994028310280
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/accumulator.rs // Contains: 3 structs, 0 enums use api_models::analytics::auth_events::AuthEventMetricsBucketValue; use super::metrics::AuthEventMetricRow; #[derive(Debug, Default)] pub struct AuthEventMetricsAccumulator { pub authentication_count: CountAccumulator, pub authentication_attempt_count: CountAccumulator, pub authentication_error_message: AuthenticationErrorMessageAccumulator, pub authentication_success_count: CountAccumulator, pub challenge_flow_count: CountAccumulator, pub challenge_attempt_count: CountAccumulator, pub challenge_success_count: CountAccumulator, pub frictionless_flow_count: CountAccumulator, pub frictionless_success_count: CountAccumulator, pub authentication_funnel: CountAccumulator, pub authentication_exemption_approved_count: CountAccumulator, pub authentication_exemption_requested_count: CountAccumulator, } #[derive(Debug, Default)] #[repr(transparent)] pub struct CountAccumulator { pub count: Option<i64>, } #[derive(Debug, Default)] pub struct AuthenticationErrorMessageAccumulator { pub count: Option<i64>, } pub trait AuthEventMetricAccumulator { type MetricOutput; fn add_metrics_bucket(&mut self, metrics: &AuthEventMetricRow); fn collect(self) -> Self::MetricOutput; } impl AuthEventMetricAccumulator for CountAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &AuthEventMetricRow) { self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.count.and_then(|i| u64::try_from(i).ok()) } } impl AuthEventMetricAccumulator for AuthenticationErrorMessageAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &AuthEventMetricRow) { self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.count.and_then(|i| u64::try_from(i).ok()) } } impl AuthEventMetricsAccumulator { pub fn collect(self) -> AuthEventMetricsBucketValue { AuthEventMetricsBucketValue { authentication_count: self.authentication_count.collect(), authentication_attempt_count: self.authentication_attempt_count.collect(), authentication_success_count: self.authentication_success_count.collect(), challenge_flow_count: self.challenge_flow_count.collect(), challenge_attempt_count: self.challenge_attempt_count.collect(), challenge_success_count: self.challenge_success_count.collect(), frictionless_flow_count: self.frictionless_flow_count.collect(), frictionless_success_count: self.frictionless_success_count.collect(), error_message_count: self.authentication_error_message.collect(), authentication_funnel: self.authentication_funnel.collect(), authentication_exemption_approved_count: self .authentication_exemption_approved_count .collect(), authentication_exemption_requested_count: self .authentication_exemption_requested_count .collect(), } } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/accumulator.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 3, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6117222443692096086
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/authentication_exemption_approved_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct AuthenticationExemptionApprovedCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for AuthenticationExemptionApprovedCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause(AuthEventDimensions::ExemptionAccepted, true) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/authentication_exemption_approved_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_6180153090871274173
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/frictionless_flow_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::DecoupledAuthenticationType; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct FrictionlessFlowCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for FrictionlessFlowCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Frictionless, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/frictionless_flow_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6564656657405178453
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/frictionless_success_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::{AuthenticationStatus, DecoupledAuthenticationType}; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct FrictionlessSuccessCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for FrictionlessSuccessCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Frictionless, ) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Success) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/frictionless_success_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-3672786378841737527
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/challenge_flow_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::DecoupledAuthenticationType; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct ChallengeFlowCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for ChallengeFlowCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Challenge, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/challenge_flow_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_4497780948854067089
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/authentication_exemption_requested_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct AuthenticationExemptionRequestedCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for AuthenticationExemptionRequestedCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause(AuthEventDimensions::ExemptionRequested, true) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/authentication_exemption_requested_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-818517781602396994
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/authentication_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct AuthenticationCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for AuthenticationCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/authentication_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_9058039672712944928
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/challenge_attempt_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::{AuthenticationStatus, DecoupledAuthenticationType}; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct ChallengeAttemptCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for ChallengeAttemptCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Challenge, ) .switch()?; query_builder .add_filter_in_range_clause( "authentication_status", &[AuthenticationStatus::Success, AuthenticationStatus::Failed], ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/challenge_attempt_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_2811475158131150291
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/authentication_attempt_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::AuthenticationStatus; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct AuthenticationAttemptCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for AuthenticationAttemptCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_in_range_clause( "authentication_status", &[AuthenticationStatus::Success, AuthenticationStatus::Failed], ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/authentication_attempt_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-6632159993566490075
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/authentication_success_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::AuthenticationStatus; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct AuthenticationSuccessCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for AuthenticationSuccessCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Success) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/authentication_success_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_3383320771024036910
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/authentication_error_message.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::AuthenticationStatus; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{ Aggregate, FilterTypes, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct AuthenticationErrorMessage; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for AuthenticationErrorMessage where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column("sum(sign_flag) AS count") .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Failed) .switch()?; query_builder .add_custom_filter_clause( AuthEventDimensions::ErrorMessage, "NULL", FilterTypes::IsNotNull, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_order_by_clause("count", Order::Descending) .attach_printable("Error adding order by clause") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/authentication_error_message.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-1512556826783390689
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/authentication_funnel.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{ Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct AuthenticationFunnel; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for AuthenticationFunnel where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_custom_filter_clause( AuthEventDimensions::TransactionStatus, "NULL", FilterTypes::IsNotNull, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/authentication_funnel.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-1860376238839868953
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/auth_events/metrics/challenge_success_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_enums::{AuthenticationStatus, DecoupledAuthenticationType}; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::AuthEventMetricRow; use crate::{ query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, AuthInfo, }; #[derive(Default)] pub(super) struct ChallengeSuccessCount; #[async_trait::async_trait] impl<T> super::AuthEventMetric<T> for ChallengeSuccessCount where T: AnalyticsDataSource + super::AuthEventMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Success) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Challenge, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/auth_events/metrics/challenge_success_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_3541974659523562278
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/distribution.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ refunds::{ RefundDimensions, RefundDistributions, RefundFilters, RefundMetricsBucketIdentifier, RefundType, }, Granularity, RefundDistributionBody, TimeRange, }; use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, }; mod sessionized_distribution; #[derive(Debug, PartialEq, Eq, serde::Deserialize)] pub struct RefundDistributionRow { pub currency: Option<DBEnumWrapper<storage_enums::Currency>>, pub refund_status: Option<DBEnumWrapper<storage_enums::RefundStatus>>, pub connector: Option<String>, pub refund_type: Option<DBEnumWrapper<RefundType>>, pub profile_id: Option<String>, pub total: Option<bigdecimal::BigDecimal>, pub count: Option<i64>, pub refund_reason: Option<String>, pub refund_error_message: Option<String>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, } pub trait RefundDistributionAnalytics: LoadRow<RefundDistributionRow> {} #[async_trait::async_trait] pub trait RefundDistribution<T> where T: AnalyticsDataSource + RefundDistributionAnalytics, { #[allow(clippy::too_many_arguments)] async fn load_distribution( &self, distribution: &RefundDistributionBody, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: &Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(RefundMetricsBucketIdentifier, RefundDistributionRow)>>; } #[async_trait::async_trait] impl<T> RefundDistribution<T> for RefundDistributions where T: AnalyticsDataSource + RefundDistributionAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_distribution( &self, distribution: &RefundDistributionBody, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: &Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(RefundMetricsBucketIdentifier, RefundDistributionRow)>> { match self { Self::SessionizedRefundReason => { sessionized_distribution::RefundReason .load_distribution( distribution, dimensions, auth, filters, granularity, time_range, pool, ) .await } Self::SessionizedRefundErrorMessage => { sessionized_distribution::RefundErrorMessage .load_distribution( distribution, dimensions, auth, filters, granularity, time_range, pool, ) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/distribution.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_3630945455526116503
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ refunds::{ RefundDimensions, RefundFilters, RefundMetrics, RefundMetricsBucketIdentifier, RefundType, }, Granularity, TimeRange, }; use diesel_models::enums as storage_enums; use time::PrimitiveDateTime; mod refund_count; mod refund_processed_amount; mod refund_success_count; mod refund_success_rate; mod sessionized_metrics; use std::collections::HashSet; use refund_count::RefundCount; use refund_processed_amount::RefundProcessedAmount; use refund_success_count::RefundSuccessCount; use refund_success_rate::RefundSuccessRate; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult}, }; #[derive(Debug, Eq, PartialEq, serde::Deserialize, Hash)] pub struct RefundMetricRow { pub currency: Option<DBEnumWrapper<storage_enums::Currency>>, pub refund_status: Option<DBEnumWrapper<storage_enums::RefundStatus>>, pub connector: Option<String>, pub refund_type: Option<DBEnumWrapper<RefundType>>, pub profile_id: Option<String>, pub refund_reason: Option<String>, pub refund_error_message: Option<String>, pub total: Option<bigdecimal::BigDecimal>, pub count: Option<i64>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub start_bucket: Option<PrimitiveDateTime>, #[serde(with = "common_utils::custom_serde::iso8601::option")] pub end_bucket: Option<PrimitiveDateTime>, } pub trait RefundMetricAnalytics: LoadRow<RefundMetricRow> {} #[async_trait::async_trait] pub trait RefundMetric<T> where T: AnalyticsDataSource + RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>>; } #[async_trait::async_trait] impl<T> RefundMetric<T> for RefundMetrics where T: AnalyticsDataSource + RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { match self { Self::RefundSuccessRate => { RefundSuccessRate::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::RefundCount => { RefundCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::RefundSuccessCount => { RefundSuccessCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::RefundProcessedAmount => { RefundProcessedAmount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundSuccessRate => { sessionized_metrics::RefundSuccessRate::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundCount => { sessionized_metrics::RefundCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundSuccessCount => { sessionized_metrics::RefundSuccessCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundProcessedAmount => { sessionized_metrics::RefundProcessedAmount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundReason => { sessionized_metrics::RefundReason .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundErrorMessage => { sessionized_metrics::RefundErrorMessage .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } } } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-619791984808787289
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/filters.rs // Contains: 1 structs, 0 enums use api_models::analytics::{ refunds::{RefundDimensions, RefundType}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums::{Currency, RefundStatus}; use error_stack::ResultExt; use time::PrimitiveDateTime; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window}, types::{ AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult, LoadRow, }, }; pub trait RefundFilterAnalytics: LoadRow<RefundFilterRow> {} pub async fn get_refund_filter_for_dimension<T>( dimension: RefundDimensions, auth: &AuthInfo, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<RefundFilterRow>> where T: AnalyticsDataSource + RefundFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Refund); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder.set_distinct(); query_builder .execute_query::<RefundFilterRow, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) } #[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)] pub struct RefundFilterRow { pub currency: Option<DBEnumWrapper<Currency>>, pub refund_status: Option<DBEnumWrapper<RefundStatus>>, pub connector: Option<String>, pub refund_type: Option<DBEnumWrapper<RefundType>>, pub profile_id: Option<String>, pub refund_reason: Option<String>, pub refund_error_message: Option<String>, }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/filters.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_4525313233661887376
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/accumulator.rs // Contains: 9 structs, 0 enums use api_models::analytics::refunds::{ ErrorMessagesResult, ReasonsResult, RefundMetricsBucketValue, }; use bigdecimal::ToPrimitive; use diesel_models::enums as storage_enums; use super::{distribution::RefundDistributionRow, metrics::RefundMetricRow}; #[derive(Debug, Default)] pub struct RefundMetricsAccumulator { pub refund_success_rate: SuccessRateAccumulator, pub refund_count: CountAccumulator, pub refund_success: CountAccumulator, pub processed_amount: RefundProcessedAmountAccumulator, pub refund_reason: RefundReasonAccumulator, pub refund_reason_distribution: RefundReasonDistributionAccumulator, pub refund_error_message: RefundReasonAccumulator, pub refund_error_message_distribution: RefundErrorMessageDistributionAccumulator, } #[derive(Debug, Default)] pub struct RefundReasonDistributionRow { pub count: i64, pub total: i64, pub refund_reason: String, } #[derive(Debug, Default)] pub struct RefundReasonDistributionAccumulator { pub refund_reason_vec: Vec<RefundReasonDistributionRow>, } #[derive(Debug, Default)] pub struct RefundErrorMessageDistributionRow { pub count: i64, pub total: i64, pub refund_error_message: String, } #[derive(Debug, Default)] pub struct RefundErrorMessageDistributionAccumulator { pub refund_error_message_vec: Vec<RefundErrorMessageDistributionRow>, } #[derive(Debug, Default)] #[repr(transparent)] pub struct RefundReasonAccumulator { pub count: u64, } #[derive(Debug, Default)] pub struct SuccessRateAccumulator { pub success: u32, pub total: u32, } #[derive(Debug, Default)] #[repr(transparent)] pub struct CountAccumulator { pub count: Option<i64>, } #[derive(Debug, Default)] pub struct RefundProcessedAmountAccumulator { pub count: Option<i64>, pub total: Option<i64>, } pub trait RefundMetricAccumulator { type MetricOutput; fn add_metrics_bucket(&mut self, metrics: &RefundMetricRow); fn collect(self) -> Self::MetricOutput; } pub trait RefundDistributionAccumulator { type DistributionOutput; fn add_distribution_bucket(&mut self, distribution: &RefundDistributionRow); fn collect(self) -> Self::DistributionOutput; } impl RefundDistributionAccumulator for RefundReasonDistributionAccumulator { type DistributionOutput = Option<Vec<ReasonsResult>>; fn add_distribution_bucket(&mut self, distribution: &RefundDistributionRow) { self.refund_reason_vec.push(RefundReasonDistributionRow { count: distribution.count.unwrap_or_default(), total: distribution .total .clone() .map(|i| i.to_i64().unwrap_or_default()) .unwrap_or_default(), refund_reason: distribution.refund_reason.clone().unwrap_or_default(), }) } fn collect(mut self) -> Self::DistributionOutput { if self.refund_reason_vec.is_empty() { None } else { self.refund_reason_vec.sort_by(|a, b| b.count.cmp(&a.count)); let mut res: Vec<ReasonsResult> = Vec::new(); for val in self.refund_reason_vec.into_iter() { let perc = f64::from(u32::try_from(val.count).ok()?) * 100.0 / f64::from(u32::try_from(val.total).ok()?); res.push(ReasonsResult { reason: val.refund_reason, count: val.count, percentage: (perc * 100.0).round() / 100.0, }) } Some(res) } } } impl RefundDistributionAccumulator for RefundErrorMessageDistributionAccumulator { type DistributionOutput = Option<Vec<ErrorMessagesResult>>; fn add_distribution_bucket(&mut self, distribution: &RefundDistributionRow) { self.refund_error_message_vec .push(RefundErrorMessageDistributionRow { count: distribution.count.unwrap_or_default(), total: distribution .total .clone() .map(|i| i.to_i64().unwrap_or_default()) .unwrap_or_default(), refund_error_message: distribution .refund_error_message .clone() .unwrap_or_default(), }) } fn collect(mut self) -> Self::DistributionOutput { if self.refund_error_message_vec.is_empty() { None } else { self.refund_error_message_vec .sort_by(|a, b| b.count.cmp(&a.count)); let mut res: Vec<ErrorMessagesResult> = Vec::new(); for val in self.refund_error_message_vec.into_iter() { let perc = f64::from(u32::try_from(val.count).ok()?) * 100.0 / f64::from(u32::try_from(val.total).ok()?); res.push(ErrorMessagesResult { error_message: val.refund_error_message, count: val.count, percentage: (perc * 100.0).round() / 100.0, }) } Some(res) } } } impl RefundMetricAccumulator for CountAccumulator { type MetricOutput = Option<u64>; #[inline] fn add_metrics_bucket(&mut self, metrics: &RefundMetricRow) { self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), } } #[inline] fn collect(self) -> Self::MetricOutput { self.count.and_then(|i| u64::try_from(i).ok()) } } impl RefundMetricAccumulator for RefundProcessedAmountAccumulator { type MetricOutput = (Option<u64>, Option<u64>, Option<u64>); #[inline] fn add_metrics_bucket(&mut self, metrics: &RefundMetricRow) { self.total = match ( self.total, metrics.total.as_ref().and_then(ToPrimitive::to_i64), ) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), }; self.count = match (self.count, metrics.count) { (None, None) => None, (None, i @ Some(_)) | (i @ Some(_), None) => i, (Some(a), Some(b)) => Some(a + b), }; } #[inline] fn collect(self) -> Self::MetricOutput { let total = u64::try_from(self.total.unwrap_or_default()).ok(); let count = self.count.and_then(|i| u64::try_from(i).ok()); (total, count, Some(0)) } } impl RefundMetricAccumulator for SuccessRateAccumulator { type MetricOutput = (Option<u32>, Option<u32>, Option<f64>); fn add_metrics_bucket(&mut self, metrics: &RefundMetricRow) { if let Some(ref refund_status) = metrics.refund_status { if refund_status.as_ref() == &storage_enums::RefundStatus::Success { if let Some(success) = metrics .count .and_then(|success| u32::try_from(success).ok()) { self.success += success; } } }; if let Some(total) = metrics.count.and_then(|total| u32::try_from(total).ok()) { self.total += total; } } fn collect(self) -> Self::MetricOutput { if self.total == 0 { (None, None, None) } else { let success = Some(self.success); let total = Some(self.total); let success_rate = match (success, total) { (Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)), _ => None, }; (success, total, success_rate) } } } impl RefundMetricAccumulator for RefundReasonAccumulator { type MetricOutput = Option<u64>; fn add_metrics_bucket(&mut self, metrics: &RefundMetricRow) { if let Some(count) = metrics.count { if let Ok(count_u64) = u64::try_from(count) { self.count += count_u64; } } } fn collect(self) -> Self::MetricOutput { Some(self.count) } } impl RefundMetricsAccumulator { pub fn collect(self) -> RefundMetricsBucketValue { let (successful_refunds, total_refunds, refund_success_rate) = self.refund_success_rate.collect(); let (refund_processed_amount, refund_processed_count, refund_processed_amount_in_usd) = self.processed_amount.collect(); RefundMetricsBucketValue { successful_refunds, total_refunds, refund_success_rate, refund_count: self.refund_count.collect(), refund_success_count: self.refund_success.collect(), refund_processed_amount, refund_processed_amount_in_usd, refund_processed_count, refund_reason_distribution: self.refund_reason_distribution.collect(), refund_error_message_distribution: self.refund_error_message_distribution.collect(), refund_reason_count: self.refund_reason.collect(), refund_error_message_count: self.refund_error_message.collect(), } } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/accumulator.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 9, "num_tables": null, "score": null, "total_crates": null }
file_analytics_4123936003701731207
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/refund_processed_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct RefundProcessedAmount {} #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundProcessedAmount where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Refund); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "refund_amount", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } query_builder.add_group_by_clause("currency").switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/refund_processed_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_8859029703702640559
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/refund_success_rate.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct RefundSuccessRate {} #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundSuccessRate where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::Refund); let mut dimensions = dimensions.to_vec(); dimensions.push(RefundDimensions::RefundStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/refund_success_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_756757890580920773
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/refund_success_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct RefundSuccessCount {} #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundSuccessCount where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::Refund); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/refund_success_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_5476406672706200391
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/refund_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(super) struct RefundCount {} #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundCount where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Refund); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.refund_status.as_ref().map(|i| i.0.to_string()), i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/refund_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_6942469131129321634
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_processed_amount.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct RefundProcessedAmount {} #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundProcessedAmount where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Sum { field: "refund_amount", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } query_builder.add_group_by_clause("currency").switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/sessionized_metrics/refund_processed_amount.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_838168142166592886
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_success_rate.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct RefundSuccessRate {} #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundSuccessRate where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::RefundSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(RefundDimensions::RefundStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/sessionized_metrics/refund_success_rate.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_7284661120182555149
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_error_message.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct RefundErrorMessage; #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundErrorMessage where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { let mut inner_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); inner_query_builder .add_select_column("sum(sign_flag)") .switch()?; inner_query_builder .add_custom_filter_clause( RefundDimensions::RefundErrorMessage, "NULL", FilterTypes::IsNotNull, ) .switch()?; time_range .set_filter_clause(&mut inner_query_builder) .attach_printable("Error filtering time range for inner query") .switch()?; let inner_query_string = inner_query_builder .build_query() .attach_printable("Error building inner query") .change_context(MetricsError::QueryBuildingError)?; let mut outer_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { outer_query_builder.add_select_column(dim).switch()?; } outer_query_builder .add_select_column("sum(sign_flag) AS count") .switch()?; outer_query_builder .add_select_column(format!("({inner_query_string}) AS total")) .switch()?; outer_query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; outer_query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters .set_filter_clause(&mut outer_query_builder) .switch()?; auth.set_filter_clause(&mut outer_query_builder).switch()?; time_range .set_filter_clause(&mut outer_query_builder) .attach_printable("Error filtering time range for outer query") .switch()?; outer_query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Failure, ) .switch()?; outer_query_builder .add_custom_filter_clause( RefundDimensions::RefundErrorMessage, "NULL", FilterTypes::IsNotNull, ) .switch()?; for dim in dimensions.iter() { outer_query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut outer_query_builder) .attach_printable("Error adding granularity") .switch()?; } outer_query_builder .add_order_by_clause("count", Order::Descending) .attach_printable("Error adding order by clause") .switch()?; let filtered_dimensions: Vec<&RefundDimensions> = dimensions .iter() .filter(|&&dim| dim != RefundDimensions::RefundErrorMessage) .collect(); for dim in &filtered_dimensions { outer_query_builder .add_order_by_clause(*dim, Order::Ascending) .attach_printable("Error adding order by clause") .switch()?; } outer_query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/sessionized_metrics/refund_error_message.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-2074473525729783853
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_success_count.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use diesel_models::enums as storage_enums; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window}, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct RefundSuccessCount {} #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundSuccessCount where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/sessionized_metrics/refund_success_count.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }
file_analytics_-9136454082866842947
clm
file
// Repository: hyperswitch // Crate: analytics // File: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_reason.rs // Contains: 1 structs, 0 enums use std::collections::HashSet; use api_models::analytics::{ refunds::{RefundDimensions, RefundFilters, RefundMetricsBucketIdentifier}, Granularity, TimeRange, }; use common_utils::errors::ReportSwitchExt; use error_stack::ResultExt; use time::PrimitiveDateTime; use super::RefundMetricRow; use crate::{ enums::AuthInfo, query::{ Aggregate, FilterTypes, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window, }, types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult}, }; #[derive(Default)] pub(crate) struct RefundReason; #[async_trait::async_trait] impl<T> super::RefundMetric<T> for RefundReason where T: AnalyticsDataSource + super::RefundMetricAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { let mut inner_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); inner_query_builder .add_select_column("sum(sign_flag)") .switch()?; inner_query_builder .add_custom_filter_clause( RefundDimensions::RefundReason, "NULL", FilterTypes::IsNotNull, ) .switch()?; time_range .set_filter_clause(&mut inner_query_builder) .attach_printable("Error filtering time range for inner query") .switch()?; let inner_query_string = inner_query_builder .build_query() .attach_printable("Error building inner query") .change_context(MetricsError::QueryBuildingError)?; let mut outer_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { outer_query_builder.add_select_column(dim).switch()?; } outer_query_builder .add_select_column("sum(sign_flag) AS count") .switch()?; outer_query_builder .add_select_column(format!("({inner_query_string}) AS total")) .switch()?; outer_query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; outer_query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters .set_filter_clause(&mut outer_query_builder) .switch()?; auth.set_filter_clause(&mut outer_query_builder).switch()?; time_range .set_filter_clause(&mut outer_query_builder) .attach_printable("Error filtering time range for outer query") .switch()?; outer_query_builder .add_custom_filter_clause( RefundDimensions::RefundReason, "NULL", FilterTypes::IsNotNull, ) .switch()?; for dim in dimensions.iter() { outer_query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut outer_query_builder) .attach_printable("Error adding granularity") .switch()?; } outer_query_builder .add_order_by_clause("count", Order::Descending) .attach_printable("Error adding order by clause") .switch()?; let filtered_dimensions: Vec<&RefundDimensions> = dimensions .iter() .filter(|&&dim| dim != RefundDimensions::RefundReason) .collect(); for dim in &filtered_dimensions { outer_query_builder .add_order_by_clause(*dim, Order::Ascending) .attach_printable("Error adding order by clause") .switch()?; } outer_query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) } }
{ "crate": "analytics", "file": "crates/analytics/src/refunds/metrics/sessionized_metrics/refund_reason.rs", "file_size": null, "is_async": null, "is_pub": null, "num_enums": 0, "num_structs": 1, "num_tables": null, "score": null, "total_crates": null }