id stringlengths 11 116 | type stringclasses 1 value | granularity stringclasses 4 values | content stringlengths 16 477k | metadata dict |
|---|---|---|---|---|
fn_clm_analytics_load_metrics_-787494008911061593 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/success_rate
// Implementation of PaymentSuccessRate for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 392,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-2747596269966648245 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/avg_ticket_size
// Implementation of AvgTicketSize for PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 400,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-6383437784035400586 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/debit_routing
// Implementation of DebitRouting for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "debit_routing_savings",
alias: Some("total"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 406,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-1307891052522529898 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/payment_count
// Implementation of PaymentCount for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 392,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-1258273766136528293 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/payment_success_count
// Implementation of PaymentSuccessCount for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 392,
"total_crates": null
} |
fn_clm_analytics_load_metrics_6581294628115519038 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/payment_processed_amount
// Implementation of PaymentProcessedAmount for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder.add_select_column("first_attempt").switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 420,
"total_crates": null
} |
fn_clm_analytics_load_distribution_-7414402142648116902 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/distribution/payment_error_message
// Implementation of PaymentErrorMessage for PaymentDistribution<T>
async fn load_distribution(
&self,
distribution: &PaymentDistributionBody,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(PaymentMetricsBucketIdentifier, PaymentDistributionRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(&distribution.distribution_for)
.switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause(&distribution.distribution_for)
.attach_printable("Error grouping by distribution_for")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Failure,
)
.switch()?;
for dim in dimensions.iter() {
query_builder.add_outer_select_column(dim).switch()?;
}
query_builder
.add_outer_select_column(&distribution.distribution_for)
.switch()?;
query_builder.add_outer_select_column("count").switch()?;
query_builder
.add_outer_select_column("start_bucket")
.switch()?;
query_builder
.add_outer_select_column("end_bucket")
.switch()?;
let sql_dimensions = query_builder.transform_to_sql_values(dimensions).switch()?;
query_builder
.add_outer_select_column(Window::Sum {
field: "count",
partition_by: Some(sql_dimensions),
order_by: None,
alias: Some("total"),
})
.switch()?;
query_builder
.add_top_n_clause(
dimensions,
distribution.distribution_cardinality.into(),
"count",
Order::Descending,
)
.switch()?;
query_builder
.execute_query::<PaymentDistributionRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
Vec<(PaymentMetricsBucketIdentifier, PaymentDistributionRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 193,
"total_crates": null
} |
fn_clm_analytics_routing_events_core_-5571308143404746375 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/routing_events/core
pub async fn routing_events_core(
pool: &AnalyticsProvider,
req: RoutingEventsRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<Vec<RoutingEventsResult>> {
let data = match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"Connector Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for Connector Events"),
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
get_routing_events(merchant_id, req, ckh_pool).await
}
}
.switch()?;
Ok(data)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 21,
"total_crates": null
} |
fn_clm_analytics_get_routing_events_4739387515297257324 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/routing_events/events
pub async fn get_routing_events<T>(
merchant_id: &common_utils::id_type::MerchantId,
query_param: RoutingEventsRequest,
pool: &T,
) -> FiltersResult<Vec<RoutingEventsResult>>
where
T: AnalyticsDataSource + RoutingEventLogAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RoutingEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder
.add_filter_clause("payment_id", &query_param.payment_id)
.switch()?;
if let Some(refund_id) = query_param.refund_id {
query_builder
.add_filter_clause("refund_id", &refund_id)
.switch()?;
}
if let Some(dispute_id) = query_param.dispute_id {
query_builder
.add_filter_clause("dispute_id", &dispute_id)
.switch()?;
}
query_builder
.execute_query::<RoutingEventsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 39,
"total_crates": null
} |
fn_clm_analytics_connector_events_core_6468591324287838683 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/connector_events/core
pub async fn connector_events_core(
pool: &AnalyticsProvider,
req: ConnectorEventsRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<Vec<ConnectorEventsResult>> {
let data = match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"Connector Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for Connector Events"),
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
get_connector_events(merchant_id, req, ckh_pool).await
}
}
.switch()?;
Ok(data)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 21,
"total_crates": null
} |
fn_clm_analytics_get_connector_events_-42736513926132580 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/connector_events/events
pub async fn get_connector_events<T>(
merchant_id: &common_utils::id_type::MerchantId,
query_param: ConnectorEventsRequest,
pool: &T,
) -> FiltersResult<Vec<ConnectorEventsResult>>
where
T: AnalyticsDataSource + ConnectorEventLogAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::ConnectorEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder
.add_filter_clause("payment_id", &query_param.payment_id)
.switch()?;
if let Some(refund_id) = query_param.refund_id {
query_builder
.add_filter_clause("refund_id", &refund_id)
.switch()?;
}
if let Some(dispute_id) = query_param.dispute_id {
query_builder
.add_filter_clause("dispute_id", &dispute_id)
.switch()?;
}
//TODO!: update the execute_query function to return reports instead of plain errors...
query_builder
.execute_query::<ConnectorEventsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 39,
"total_crates": null
} |
fn_clm_analytics_get_filters_-2942824424997980640 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/core
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetApiEventFiltersRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<ApiEventFiltersResponse> {
use api_models::analytics::{api_event::ApiEventDimensions, ApiEventFilterValue};
use super::filters::get_api_event_filter_for_dimension;
use crate::api_event::filters::ApiEventFilter;
let mut res = ApiEventFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(_pool) => Err(FiltersError::NotImplemented(
"API Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for API Events"),
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
get_api_event_filter_for_dimension(dim, merchant_id, &req.time_range, ckh_pool)
.await
}
}
.switch()?
.into_iter()
.filter_map(|fil: ApiEventFilter| match dim {
ApiEventDimensions::StatusCode => fil.status_code.map(|i| i.to_string()),
ApiEventDimensions::FlowType => fil.flow_type,
ApiEventDimensions::ApiFlow => fil.api_flow,
})
.collect::<Vec<String>>();
res.query_data.push(ApiEventFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 78,
"total_crates": null
} |
fn_clm_analytics_get_api_event_metrics_-2942824424997980640 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/core
pub async fn get_api_event_metrics(
pool: &AnalyticsProvider,
merchant_id: &common_utils::id_type::MerchantId,
req: GetApiEventMetricRequest,
) -> AnalyticsResult<MetricsResponse<ApiMetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<ApiEventMetricsBucketIdentifier, ApiEventMetricRow> =
HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_api_metrics_query",
api_event_metric = metric_type.as_ref()
);
// TODO: lifetime issues with joinset,
// can be optimized away if joinset lifetime requirements are relaxed
let merchant_id_scoped = merchant_id.to_owned();
set.spawn(
async move {
let data = pool
.get_api_event_metrics(
&metric_type,
&req.group_by_names.clone(),
&merchant_id_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
}
.instrument(task_span),
);
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
metrics_accumulator
.entry(id)
.and_modify(|data| {
data.api_count = data.api_count.or(value.api_count);
data.status_code_count = data.status_code_count.or(value.status_code_count);
data.latency = data.latency.or(value.latency);
})
.or_insert(value);
}
}
let query_data: Vec<ApiMetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| ApiMetricsBucketResponse {
values: ApiEventMetricsBucketValue {
latency: val.latency,
api_count: val.api_count,
status_code_count: val.status_code_count,
},
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 72,
"total_crates": null
} |
fn_clm_analytics_api_events_core_-2942824424997980640 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/core
pub async fn api_events_core(
pool: &AnalyticsProvider,
req: ApiLogsRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<Vec<ApiLogsResult>> {
let data = match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"API Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for API Events"),
AnalyticsProvider::Clickhouse(pool) => get_api_event(merchant_id, req, pool).await,
AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool)
| AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => {
get_api_event(merchant_id, req, ckh_pool).await
}
}
.switch()?;
Ok(data)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 23,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_-4922300030624596243 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/types
// Implementation of ApiEventFilters for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.status_code.is_empty() {
builder
.add_filter_in_range_clause(ApiEventDimensions::StatusCode, &self.status_code)
.attach_printable("Error adding status_code filter")?;
}
if !self.flow_type.is_empty() {
builder
.add_filter_in_range_clause(ApiEventDimensions::FlowType, &self.flow_type)
.attach_printable("Error adding flow_type filter")?;
}
if !self.api_flow.is_empty() {
builder
.add_filter_in_range_clause(ApiEventDimensions::ApiFlow, &self.api_flow)
.attach_printable("Error adding api_name filter")?;
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 716,
"total_crates": null
} |
fn_clm_analytics_get_api_event_-9014046169691575055 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/events
pub async fn get_api_event<T>(
merchant_id: &common_utils::id_type::MerchantId,
query_param: ApiLogsRequest,
pool: &T,
) -> FiltersResult<Vec<ApiLogsResult>>
where
T: AnalyticsDataSource + ApiLogsFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
match query_param.query_param {
QueryType::Payment { payment_id } => {
query_builder
.add_filter_clause("payment_id", &payment_id)
.switch()?;
query_builder
.add_filter_in_range_clause(
"api_flow",
&[
Flow::PaymentsCancel,
Flow::PaymentsCapture,
Flow::PaymentsConfirm,
Flow::PaymentsCreate,
Flow::PaymentsStart,
Flow::PaymentsUpdate,
Flow::RefundsCreate,
Flow::RefundsUpdate,
Flow::DisputesEvidenceSubmit,
Flow::AttachDisputeEvidence,
Flow::RetrieveDisputeEvidence,
Flow::IncomingWebhookReceive,
],
)
.switch()?;
}
QueryType::Refund {
payment_id,
refund_id,
} => {
query_builder
.add_filter_clause("payment_id", &payment_id)
.switch()?;
query_builder
.add_filter_clause("refund_id", refund_id)
.switch()?;
query_builder
.add_filter_in_range_clause("api_flow", &[Flow::RefundsCreate, Flow::RefundsUpdate])
.switch()?;
}
QueryType::Dispute {
payment_id,
dispute_id,
} => {
query_builder
.add_filter_clause("payment_id", &payment_id)
.switch()?;
query_builder
.add_filter_clause("dispute_id", dispute_id)
.switch()?;
query_builder
.add_filter_in_range_clause(
"api_flow",
&[
Flow::DisputesEvidenceSubmit,
Flow::AttachDisputeEvidence,
Flow::RetrieveDisputeEvidence,
],
)
.switch()?;
}
}
//TODO!: update the execute_query function to return reports instead of plain errors...
query_builder
.execute_query::<ApiLogsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 62,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-5455013079523400807 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/metrics
// Implementation of ApiEventMetrics for ApiEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
match self {
Self::Latency => {
MaxLatency
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::ApiCount => {
ApiCount
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::StatusCodeCount => {
StatusCodeCount
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 284,
"total_crates": null
} |
fn_clm_analytics_get_api_event_filter_for_dimension_-2391317180182972608 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/filters
pub async fn get_api_event_filter_for_dimension<T>(
dimension: ApiEventDimensions,
merchant_id: &common_utils::id_type::MerchantId,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<ApiEventFilter>>
where
T: AnalyticsDataSource + ApiEventFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<ApiEventFilter, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 35,
"total_crates": null
} |
fn_clm_analytics_load_metrics_5179948144914373771 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/metrics/latency
// Implementation of MaxLatency for super::ApiEventMetric<T>
async fn load_metrics(
&self,
_dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder
.add_select_column(Aggregate::Sum {
field: "latency",
alias: Some("latency_sum"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: Some("latency"),
alias: Some("latency_count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_custom_filter_clause("request", "10.63.134.6", FilterTypes::NotLike)
.attach_printable("Error filtering out locker IP")
.switch()?;
query_builder
.execute_query::<LatencyAvg, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
ApiEventMetricsBucketIdentifier::new(TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
}),
ApiEventMetricRow {
latency: if i.latency_count != 0 {
Some(i.latency_sum.unwrap_or(0) / i.latency_count)
} else {
None
},
api_count: None,
status_code_count: None,
start_bucket: i.start_bucket,
end_bucket: i.end_bucket,
},
))
})
.collect::<error_stack::Result<
HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 348,
"total_crates": null
} |
fn_clm_analytics_load_metrics_2071837773289595877 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/metrics/status_code_count
// Implementation of StatusCodeCount for super::ApiEventMetric<T>
async fn load_metrics(
&self,
_dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder
.add_select_column(Aggregate::Count {
field: Some("status_code"),
alias: Some("status_code_count"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<ApiEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
ApiEventMetricsBucketIdentifier::new(TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
}),
i,
))
})
.collect::<error_stack::Result<
HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 336,
"total_crates": null
} |
fn_clm_analytics_load_metrics_7518088828445753363 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/api_event/metrics/api_count
// Implementation of ApiCount for super::ApiEventMetric<T>
async fn load_metrics(
&self,
_dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("api_count"),
})
.switch()?;
if !filters.flow_type.is_empty() {
query_builder
.add_filter_in_range_clause(ApiEventDimensions::FlowType, &filters.flow_type)
.attach_printable("Error adding flow_type filter")
.switch()?;
}
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.execute_query::<ApiEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
ApiEventMetricsBucketIdentifier::new(TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
}),
i,
))
})
.collect::<error_stack::Result<
HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 340,
"total_crates": null
} |
fn_clm_analytics_get_metrics_3264536209302971765 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/core
pub async fn get_metrics(
pool: &AnalyticsProvider,
ex_rates: &Option<ExchangeRates>,
auth: &AuthInfo,
req: GetPaymentIntentMetricRequest,
) -> AnalyticsResult<PaymentIntentsMetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
PaymentIntentMetricsBucketIdentifier,
PaymentIntentMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_payment_intents_metrics_query",
payment_metric = metric_type.as_ref()
);
// TODO: lifetime issues with joinset,
// can be optimized away if joinset lifetime requirements are relaxed
let auth_scoped = auth.to_owned();
set.spawn(
async move {
let data = pool
.get_payment_intent_metrics(
&metric_type,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
TaskType::MetricTask(metric_type, data)
}
.instrument(task_span),
);
}
while let Some(task_type) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
match task_type {
TaskType::MetricTask(metric, data) => {
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
PaymentIntentMetrics::SuccessfulSmartRetries
| PaymentIntentMetrics::SessionizedSuccessfulSmartRetries => {
metrics_builder
.successful_smart_retries
.add_metrics_bucket(&value)
}
PaymentIntentMetrics::TotalSmartRetries
| PaymentIntentMetrics::SessionizedTotalSmartRetries => metrics_builder
.total_smart_retries
.add_metrics_bucket(&value),
PaymentIntentMetrics::SmartRetriedAmount
| PaymentIntentMetrics::SessionizedSmartRetriedAmount => metrics_builder
.smart_retried_amount
.add_metrics_bucket(&value),
PaymentIntentMetrics::PaymentIntentCount
| PaymentIntentMetrics::SessionizedPaymentIntentCount => metrics_builder
.payment_intent_count
.add_metrics_bucket(&value),
PaymentIntentMetrics::PaymentsSuccessRate
| PaymentIntentMetrics::SessionizedPaymentsSuccessRate => metrics_builder
.payments_success_rate
.add_metrics_bucket(&value),
PaymentIntentMetrics::SessionizedPaymentProcessedAmount
| PaymentIntentMetrics::PaymentProcessedAmount => metrics_builder
.payment_processed_amount
.add_metrics_bucket(&value),
PaymentIntentMetrics::SessionizedPaymentsDistribution => metrics_builder
.payments_distribution
.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
}
}
let mut success = 0;
let mut success_without_smart_retries = 0;
let mut total_smart_retried_amount = 0;
let mut total_smart_retried_amount_in_usd = 0;
let mut total_smart_retried_amount_without_smart_retries = 0;
let mut total_smart_retried_amount_without_smart_retries_in_usd = 0;
let mut total = 0;
let mut total_payment_processed_amount = 0;
let mut total_payment_processed_amount_in_usd = 0;
let mut total_payment_processed_count = 0;
let mut total_payment_processed_amount_without_smart_retries = 0;
let mut total_payment_processed_amount_without_smart_retries_in_usd = 0;
let mut total_payment_processed_count_without_smart_retries = 0;
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| {
let mut collected_values = val.collect();
if let Some(success_count) = collected_values.successful_payments {
success += success_count;
}
if let Some(success_count) = collected_values.successful_payments_without_smart_retries
{
success_without_smart_retries += success_count;
}
if let Some(total_count) = collected_values.total_payments {
total += total_count;
}
if let Some(retried_amount) = collected_values.smart_retried_amount {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(retried_amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.smart_retried_amount_in_usd = amount_in_usd;
total_smart_retried_amount += retried_amount;
total_smart_retried_amount_in_usd += amount_in_usd.unwrap_or(0);
}
if let Some(retried_amount) =
collected_values.smart_retried_amount_without_smart_retries
{
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(retried_amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.smart_retried_amount_without_smart_retries_in_usd = amount_in_usd;
total_smart_retried_amount_without_smart_retries += retried_amount;
total_smart_retried_amount_without_smart_retries_in_usd +=
amount_in_usd.unwrap_or(0);
}
if let Some(amount) = collected_values.payment_processed_amount {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.payment_processed_amount_in_usd = amount_in_usd;
total_payment_processed_amount_in_usd += amount_in_usd.unwrap_or(0);
total_payment_processed_amount += amount;
}
if let Some(count) = collected_values.payment_processed_count {
total_payment_processed_count += count;
}
if let Some(amount) = collected_values.payment_processed_amount_without_smart_retries {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.payment_processed_amount_without_smart_retries_in_usd =
amount_in_usd;
total_payment_processed_amount_without_smart_retries_in_usd +=
amount_in_usd.unwrap_or(0);
total_payment_processed_amount_without_smart_retries += amount;
}
if let Some(count) = collected_values.payment_processed_count_without_smart_retries {
total_payment_processed_count_without_smart_retries += count;
}
MetricsBucketResponse {
values: collected_values,
dimensions: id,
}
})
.collect();
let total_success_rate = match (success, total) {
(s, t) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
let total_success_rate_without_smart_retries = match (success_without_smart_retries, total) {
(s, t) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
Ok(PaymentIntentsMetricsResponse {
query_data,
meta_data: [PaymentIntentsAnalyticsMetadata {
total_success_rate,
total_success_rate_without_smart_retries,
total_smart_retried_amount: Some(total_smart_retried_amount),
total_smart_retried_amount_without_smart_retries: Some(
total_smart_retried_amount_without_smart_retries,
),
total_payment_processed_amount: Some(total_payment_processed_amount),
total_payment_processed_amount_without_smart_retries: Some(
total_payment_processed_amount_without_smart_retries,
),
total_smart_retried_amount_in_usd: if ex_rates.is_some() {
Some(total_smart_retried_amount_in_usd)
} else {
None
},
total_smart_retried_amount_without_smart_retries_in_usd: if ex_rates.is_some() {
Some(total_smart_retried_amount_without_smart_retries_in_usd)
} else {
None
},
total_payment_processed_amount_in_usd: if ex_rates.is_some() {
Some(total_payment_processed_amount_in_usd)
} else {
None
},
total_payment_processed_amount_without_smart_retries_in_usd: if ex_rates.is_some() {
Some(total_payment_processed_amount_without_smart_retries_in_usd)
} else {
None
},
total_payment_processed_count: Some(total_payment_processed_count),
total_payment_processed_count_without_smart_retries: Some(
total_payment_processed_count_without_smart_retries,
),
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 250,
"total_crates": null
} |
fn_clm_analytics_get_filters_3264536209302971765 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/core
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetPaymentIntentFiltersRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<PaymentIntentFiltersResponse> {
let mut res = PaymentIntentFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_payment_intent_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_payment_intent_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_poll, ckh_pool) => {
let ckh_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_poll,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payment intents analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_poll, ckh_pool) => {
let ckh_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_poll,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payment intents analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: PaymentIntentFilterRow| match dim {
PaymentIntentDimensions::PaymentIntentStatus => fil.status.map(|i| i.as_ref().to_string()),
PaymentIntentDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()),
PaymentIntentDimensions::ProfileId => fil.profile_id,
PaymentIntentDimensions::Connector => fil.connector,
PaymentIntentDimensions::AuthType => fil.authentication_type.map(|i| i.as_ref().to_string()),
PaymentIntentDimensions::PaymentMethod => fil.payment_method,
PaymentIntentDimensions::PaymentMethodType => fil.payment_method_type,
PaymentIntentDimensions::CardNetwork => fil.card_network,
PaymentIntentDimensions::MerchantId => fil.merchant_id,
PaymentIntentDimensions::CardLast4 => fil.card_last_4,
PaymentIntentDimensions::CardIssuer => fil.card_issuer,
PaymentIntentDimensions::ErrorReason => fil.error_reason,
})
.collect::<Vec<String>>();
res.query_data.push(PaymentIntentFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 98,
"total_crates": null
} |
fn_clm_analytics_get_sankey_3264536209302971765 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/core
pub async fn get_sankey(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: TimeRange,
) -> AnalyticsResult<Vec<SankeyRow>> {
match pool {
AnalyticsProvider::Sqlx(_) => Err(AnalyticsError::NotImplemented(
"Sankey not implemented for sqlx",
))?,
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool) => {
let sankey_rows = get_sankey_data(ckh_pool, auth, &req)
.await
.change_context(AnalyticsError::UnknownError)?;
Ok(sankey_rows)
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 34,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_5744622211962734547 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/types
// Implementation of PaymentIntentFilters for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.status.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::PaymentIntentStatus,
&self.status,
)
.attach_printable("Error adding payment intent status filter")?;
}
if !self.currency.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::Currency, &self.currency)
.attach_printable("Error adding currency filter")?;
}
if !self.profile_id.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::ProfileId, &self.profile_id)
.attach_printable("Error adding profile id filter")?;
}
if !self.connector.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::Connector, &self.connector)
.attach_printable("Error adding connector filter")?;
}
if !self.auth_type.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::AuthType, &self.auth_type)
.attach_printable("Error adding auth type filter")?;
}
if !self.payment_method.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::PaymentMethod,
&self.payment_method,
)
.attach_printable("Error adding payment method filter")?;
}
if !self.payment_method_type.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::PaymentMethodType,
&self.payment_method_type,
)
.attach_printable("Error adding payment method type filter")?;
}
if !self.card_network.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::CardNetwork,
&self.card_network,
)
.attach_printable("Error adding card network filter")?;
}
if !self.merchant_id.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::MerchantId, &self.merchant_id)
.attach_printable("Error adding merchant id filter")?;
}
if !self.card_last_4.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::CardLast4, &self.card_last_4)
.attach_printable("Error adding card last 4 filter")?;
}
if !self.card_issuer.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::CardIssuer, &self.card_issuer)
.attach_printable("Error adding card issuer filter")?;
}
if !self.error_reason.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::ErrorReason,
&self.error_reason,
)
.attach_printable("Error adding error reason filter")?;
}
if !self.customer_id.is_empty() {
builder
.add_filter_in_range_clause("customer_id", &self.customer_id)
.attach_printable("Error adding customer id filter")?;
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 776,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-8377707349214184515 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics
// Implementation of PaymentIntentMetrics for PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
match self {
Self::SuccessfulSmartRetries => {
SuccessfulSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::TotalSmartRetries => {
TotalSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SmartRetriedAmount => {
SmartRetriedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentIntentCount => {
PaymentIntentCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentsSuccessRate => {
PaymentsSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentProcessedAmount => {
PaymentProcessedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedSuccessfulSmartRetries => {
sessionized_metrics::SuccessfulSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedTotalSmartRetries => {
sessionized_metrics::TotalSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedSmartRetriedAmount => {
sessionized_metrics::SmartRetriedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentIntentCount => {
sessionized_metrics::PaymentIntentCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentsSuccessRate => {
sessionized_metrics::PaymentsSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentProcessedAmount => {
sessionized_metrics::PaymentProcessedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentsDistribution => {
sessionized_metrics::PaymentsDistribution
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 304,
"total_crates": null
} |
fn_clm_analytics_try_into_3801484380078630222 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/sankey
// Implementation of serde_json::Value for TryInto<SankeyRow>
fn try_into(self) -> Result<SankeyRow, Self::Error> {
logger::debug!("Parsing SankeyRow from {:?}", self);
serde_json::from_value(self).change_context(ParsingError::StructParseFailure(
"Failed to parse Sankey in clickhouse results",
))
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 173,
"total_crates": null
} |
fn_clm_analytics_get_sankey_data_3801484380078630222 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/sankey
pub async fn get_sankey_data(
clickhouse_client: &ClickhouseClient,
auth: &AuthInfo,
time_range: &TimeRange,
) -> MetricsResult<Vec<SankeyRow>> {
let mut query_builder =
QueryBuilder::<ClickhouseClient>::new(AnalyticsCollection::PaymentIntentSessionized);
query_builder
.add_select_column(Aggregate::<String>::Count {
field: None,
alias: Some("count"),
})
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("status")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("refunds_status")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("dispute_status")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
auth.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
time_range
.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("status")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("refunds_status")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("dispute_status")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.execute_query::<SankeyRow, _>(clickhouse_client)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(Ok)
.collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 88,
"total_crates": null
} |
fn_clm_analytics_get_payment_intent_filter_for_dimension_-7005640911850098378 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/filters
pub async fn get_payment_intent_filter_for_dimension<T>(
dimension: PaymentIntentDimensions,
merchant_id: &common_utils::id_type::MerchantId,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<PaymentIntentFilterRow>>
where
T: AnalyticsDataSource + PaymentIntentFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<PaymentIntentFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 50,
"total_crates": null
} |
fn_clm_analytics_collect_-2287335991659071139 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/accumulator
// Inherent implementation for PaymentIntentMetricsAccumulator
pub fn collect(self) -> PaymentIntentMetricsBucketValue {
let (
successful_payments,
successful_payments_without_smart_retries,
total_payments,
payments_success_rate,
payments_success_rate_without_smart_retries,
) = self.payments_success_rate.collect();
let (
smart_retried_amount,
smart_retried_amount_without_smart_retries,
smart_retried_amount_in_usd,
smart_retried_amount_without_smart_retries_in_usd,
) = self.smart_retried_amount.collect();
let (
payment_processed_amount,
payment_processed_count,
payment_processed_amount_without_smart_retries,
payment_processed_count_without_smart_retries,
payment_processed_amount_in_usd,
payment_processed_amount_without_smart_retries_in_usd,
) = self.payment_processed_amount.collect();
let (
payments_success_rate_distribution_without_smart_retries,
payments_failure_rate_distribution_without_smart_retries,
) = self.payments_distribution.collect();
PaymentIntentMetricsBucketValue {
successful_smart_retries: self.successful_smart_retries.collect(),
total_smart_retries: self.total_smart_retries.collect(),
smart_retried_amount,
smart_retried_amount_in_usd,
smart_retried_amount_without_smart_retries,
smart_retried_amount_without_smart_retries_in_usd,
payment_intent_count: self.payment_intent_count.collect(),
successful_payments,
successful_payments_without_smart_retries,
total_payments,
payments_success_rate,
payments_success_rate_without_smart_retries,
payment_processed_amount,
payment_processed_count,
payment_processed_amount_without_smart_retries,
payment_processed_count_without_smart_retries,
payments_success_rate_distribution_without_smart_retries,
payments_failure_rate_distribution_without_smart_retries,
payment_processed_amount_in_usd,
payment_processed_amount_without_smart_retries_in_usd,
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1445,
"total_crates": null
} |
fn_clm_analytics_add_metrics_bucket_-2287335991659071139 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/accumulator
// Implementation of PaymentsDistributionAccumulator for PaymentIntentMetricAccumulator
fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) {
let first_attempt = metrics.first_attempt.unwrap_or(0);
if let Some(ref status) = metrics.status {
if status.as_ref() == &storage_enums::IntentStatus::Succeeded {
if let Some(success) = metrics
.count
.and_then(|success| u32::try_from(success).ok())
{
if first_attempt == 1 {
self.success_without_retries += success;
}
}
}
if let Some(failed) = metrics.count.and_then(|failed| u32::try_from(failed).ok()) {
if first_attempt == 0
|| (first_attempt == 1
&& status.as_ref() == &storage_enums::IntentStatus::Failed)
{
self.failed_without_retries += failed;
}
}
if status.as_ref() != &storage_enums::IntentStatus::RequiresCustomerAction
&& status.as_ref() != &storage_enums::IntentStatus::RequiresPaymentMethod
&& status.as_ref() != &storage_enums::IntentStatus::RequiresMerchantAction
&& status.as_ref() != &storage_enums::IntentStatus::RequiresConfirmation
{
if let Some(total) = metrics.count.and_then(|total| u32::try_from(total).ok()) {
self.total += total;
}
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 190,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-198350560630243172 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/payments_success_rate
// Implementation of PaymentsSuccessRate for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 380,
"total_crates": null
} |
fn_clm_analytics_load_metrics_5080860985878246681 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/total_smart_retries
// Implementation of TotalSmartRetries for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 384,
"total_crates": null
} |
fn_clm_analytics_load_metrics_3805283965162844435 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/payment_intent_count
// Implementation of PaymentIntentCount for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 380,
"total_crates": null
} |
fn_clm_analytics_load_metrics_2024946312112321367 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/successful_smart_retries
// Implementation of SuccessfulSmartRetries for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 388,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-5366521891238787064 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/smart_retried_amount
// Implementation of SmartRetriedAmount for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 398,
"total_crates": null
} |
fn_clm_analytics_load_metrics_8742916750832744795 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/payment_processed_amount
// Implementation of PaymentProcessedAmount for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentIntentDimensions::PaymentIntentStatus,
storage_enums::IntentStatus::Succeeded,
)
.switch()?;
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 398,
"total_crates": null
} |
fn_clm_analytics_load_metrics_1926287053724202473 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_success_rate
// Implementation of PaymentsSuccessRate for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt".to_string())
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 392,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-5498099359869658060 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/sessionized_metrics/total_smart_retries
// Implementation of TotalSmartRetries for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 384,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-734897447310608646 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_distribution
// Implementation of PaymentsDistribution for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 390,
"total_crates": null
} |
fn_clm_analytics_load_metrics_1247684622629325753 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_intent_count
// Implementation of PaymentIntentCount for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 380,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-2734564410566090211 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/sessionized_metrics/successful_smart_retries
// Implementation of SuccessfulSmartRetries for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 388,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-5396728363407325163 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/sessionized_metrics/smart_retried_amount
// Implementation of SmartRetriedAmount for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 408,
"total_crates": null
} |
fn_clm_analytics_load_metrics_198853910783601371 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_processed_amount
// Implementation of PaymentProcessedAmount for super::PaymentIntentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentIntentDimensions::PaymentIntentStatus,
storage_enums::IntentStatus::Succeeded,
)
.switch()?;
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 408,
"total_crates": null
} |
fn_clm_analytics_get_metrics_-4652895912443208711 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/active_payments/core
pub async fn get_metrics(
pool: &AnalyticsProvider,
publishable_key: &String,
merchant_id: &common_utils::id_type::MerchantId,
req: GetActivePaymentsMetricRequest,
) -> AnalyticsResult<MetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let publishable_key_scoped = publishable_key.to_owned();
let merchant_id_scoped = merchant_id.to_owned();
let pool = pool.clone();
set.spawn(async move {
let data = pool
.get_active_payments_metrics(
&metric_type,
&merchant_id_scoped,
&publishable_key_scoped,
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
});
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
logger::info!("Logging metric: {metric} Result: {:?}", data);
for (id, value) in data? {
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
ActivePaymentsMetrics::ActivePayments => {
metrics_builder.active_payments.add_metrics_bucket(&value)
}
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| MetricsBucketResponse {
values: val.collect(),
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 104,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-2581705518405657616 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/active_payments/metrics
// Implementation of ActivePaymentsMetrics for ActivePaymentsMetric<T>
async fn load_metrics(
&self,
merchant_id: &common_utils::id_type::MerchantId,
publishable_key: &str,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<
HashSet<(
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricRow,
)>,
> {
match self {
Self::ActivePayments => {
ActivePayments
.load_metrics(merchant_id, publishable_key, time_range, pool)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 280,
"total_crates": null
} |
fn_clm_analytics_collect_4233306731385869935 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/active_payments/accumulator
// Inherent implementation for ActivePaymentsMetricsAccumulator
pub fn collect(self) -> ActivePaymentsMetricsBucketValue {
ActivePaymentsMetricsBucketValue {
active_payments: self.active_payments.collect(),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1433,
"total_crates": null
} |
fn_clm_analytics_add_metrics_bucket_4233306731385869935 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/active_payments/accumulator
// Implementation of CountAccumulator for ActivePaymentsMetricAccumulator
fn add_metrics_bucket(&mut self, metrics: &ActivePaymentsMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 158,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-5973133036627559915 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/active_payments/metrics/active_payments
// Implementation of ActivePayments for super::ActivePaymentsMetric<T>
async fn load_metrics(
&self,
merchant_id: &common_utils::id_type::MerchantId,
publishable_key: &str,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<
HashSet<(
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricRow,
)>,
> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::ActivePaymentsAnalytics);
query_builder
.add_select_column(Aggregate::DistinctCount {
field: "payment_id",
alias: Some("count"),
})
.switch()?;
query_builder
.add_custom_filter_clause(
"merchant_id",
format!("'{}','{}'", merchant_id.get_string_repr(), publishable_key),
FilterTypes::In,
)
.switch()?;
query_builder
.add_negative_filter_clause("payment_id", "")
.switch()?;
query_builder
.add_custom_filter_clause(
"flow_type",
"'sdk', 'payment', 'payment_redirection_response'",
FilterTypes::In,
)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.execute_query::<ActivePaymentsMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| Ok((ActivePaymentsMetricsBucketIdentifier::new(None), i)))
.collect::<error_stack::Result<
HashSet<(
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricRow,
)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 314,
"total_crates": null
} |
fn_clm_analytics_get_metrics_1449960899046810626 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/core
pub async fn get_metrics(
pool: &AnalyticsProvider,
publishable_key: &String,
req: GetSdkEventMetricRequest,
) -> AnalyticsResult<MetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
SdkEventMetricsBucketIdentifier,
SdkEventMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let publishable_key_scoped = publishable_key.to_owned();
let pool = pool.clone();
set.spawn(async move {
let data = pool
.get_sdk_event_metrics(
&metric_type,
&req.group_by_names.clone(),
&publishable_key_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
});
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
logger::info!("Logging Result {:?}", data);
for (id, value) in data? {
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
SdkEventMetrics::PaymentAttempts => {
metrics_builder.payment_attempts.add_metrics_bucket(&value)
}
SdkEventMetrics::PaymentMethodsCallCount => metrics_builder
.payment_methods_call_count
.add_metrics_bucket(&value),
SdkEventMetrics::SdkRenderedCount => metrics_builder
.sdk_rendered_count
.add_metrics_bucket(&value),
SdkEventMetrics::SdkInitiatedCount => metrics_builder
.sdk_initiated_count
.add_metrics_bucket(&value),
SdkEventMetrics::PaymentMethodSelectedCount => metrics_builder
.payment_method_selected_count
.add_metrics_bucket(&value),
SdkEventMetrics::PaymentDataFilledCount => metrics_builder
.payment_data_filled_count
.add_metrics_bucket(&value),
SdkEventMetrics::AveragePaymentTime => metrics_builder
.average_payment_time
.add_metrics_bucket(&value),
SdkEventMetrics::LoadTime => metrics_builder.load_time.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| MetricsBucketResponse {
values: val.collect(),
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 122,
"total_crates": null
} |
fn_clm_analytics_get_filters_1449960899046810626 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/core
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetSdkEventFiltersRequest,
publishable_key: &String,
) -> AnalyticsResult<SdkEventFiltersResponse> {
use api_models::analytics::{sdk_events::SdkEventDimensions, SdkEventFilterValue};
use super::filters::get_sdk_event_filter_for_dimension;
use crate::sdk_events::filters::SdkEventFilter;
let mut res = SdkEventFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(_pool) => Err(FiltersError::NotImplemented(
"SDK Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for SDK Events"),
AnalyticsProvider::Clickhouse(pool) => {
get_sdk_event_filter_for_dimension(dim, publishable_key, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool)
| AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => {
get_sdk_event_filter_for_dimension(dim, publishable_key, &req.time_range, ckh_pool)
.await
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: SdkEventFilter| match dim {
SdkEventDimensions::PaymentMethod => fil.payment_method,
SdkEventDimensions::Platform => fil.platform,
SdkEventDimensions::BrowserName => fil.browser_name,
SdkEventDimensions::Source => fil.source,
SdkEventDimensions::Component => fil.component,
SdkEventDimensions::PaymentExperience => fil.payment_experience,
})
.collect::<Vec<String>>();
res.query_data.push(SdkEventFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 76,
"total_crates": null
} |
fn_clm_analytics_sdk_events_core_1449960899046810626 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/core
pub async fn sdk_events_core(
pool: &AnalyticsProvider,
req: SdkEventsRequest,
publishable_key: &String,
) -> AnalyticsResult<Vec<SdkEventsResult>> {
match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"SDK Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for Sdk Events"),
AnalyticsProvider::Clickhouse(pool) => get_sdk_event(publishable_key, req, pool).await,
AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool)
| AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => {
get_sdk_event(publishable_key, req, ckh_pool).await
}
}
.switch()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 23,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_-1945885039183258293 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/types
// Implementation of SdkEventFilters for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.payment_method.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::PaymentMethod, &self.payment_method)
.attach_printable("Error adding payment method filter")?;
}
if !self.platform.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::Platform, &self.platform)
.attach_printable("Error adding platform filter")?;
}
if !self.browser_name.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::BrowserName, &self.browser_name)
.attach_printable("Error adding browser name filter")?;
}
if !self.source.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::Source, &self.source)
.attach_printable("Error adding source filter")?;
}
if !self.component.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::Component, &self.component)
.attach_printable("Error adding component filter")?;
}
if !self.payment_experience.is_empty() {
builder
.add_filter_in_range_clause(
SdkEventDimensions::PaymentExperience,
&self.payment_experience,
)
.attach_printable("Error adding payment experience filter")?;
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 734,
"total_crates": null
} |
fn_clm_analytics_get_sdk_event_-3345483030359656899 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/events
pub async fn get_sdk_event<T>(
publishable_key: &String,
request: SdkEventsRequest,
pool: &T,
) -> FiltersResult<Vec<SdkEventsResult>>
where
T: AnalyticsDataSource + SdkEventsFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let static_event_list = SdkEventNames::iter()
.map(|i| format!("'{}'", i.as_ref()))
.collect::<Vec<String>>()
.join(",");
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_filter_clause("payment_id", &request.payment_id)
.switch()?;
query_builder
.add_custom_filter_clause("event_name", static_event_list, FilterTypes::In)
.switch()?;
let _ = &request
.time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
//TODO!: update the execute_query function to return reports instead of plain errors...
query_builder
.execute_query::<SdkEventsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 50,
"total_crates": null
} |
fn_clm_analytics_load_metrics_401944254737775452 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics
// Implementation of SdkEventMetrics for SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
match self {
Self::PaymentAttempts => {
PaymentAttempts
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::PaymentMethodsCallCount => {
PaymentMethodsCallCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::SdkRenderedCount => {
SdkRenderedCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::SdkInitiatedCount => {
SdkInitiatedCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::PaymentMethodSelectedCount => {
PaymentMethodSelectedCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::PaymentDataFilledCount => {
PaymentDataFilledCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::AveragePaymentTime => {
AveragePaymentTime
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::LoadTime => {
LoadTime
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 294,
"total_crates": null
} |
fn_clm_analytics_get_sdk_event_filter_for_dimension_6112143976662803486 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/filters
pub async fn get_sdk_event_filter_for_dimension<T>(
dimension: SdkEventDimensions,
publishable_key: &String,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<SdkEventFilter>>
where
T: AnalyticsDataSource + SdkEventFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<SdkEventFilter, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 38,
"total_crates": null
} |
fn_clm_analytics_collect_864927405442929712 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/accumulator
// Inherent implementation for SdkEventMetricsAccumulator
pub fn collect(self) -> SdkEventMetricsBucketValue {
SdkEventMetricsBucketValue {
payment_attempts: self.payment_attempts.collect(),
payment_methods_call_count: self.payment_methods_call_count.collect(),
average_payment_time: self.average_payment_time.collect(),
load_time: self.load_time.collect(),
sdk_initiated_count: self.sdk_initiated_count.collect(),
sdk_rendered_count: self.sdk_rendered_count.collect(),
payment_method_selected_count: self.payment_method_selected_count.collect(),
payment_data_filled_count: self.payment_data_filled_count.collect(),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1447,
"total_crates": null
} |
fn_clm_analytics_add_metrics_bucket_864927405442929712 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/accumulator
// Implementation of AverageAccumulator for SdkEventMetricAccumulator
fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow) {
let total = metrics
.total
.as_ref()
.and_then(bigdecimal::ToPrimitive::to_u32);
let count = metrics.count.and_then(|total| u32::try_from(total).ok());
match (total, count) {
(Some(total), Some(count)) => {
self.total += total;
self.count += count;
}
_ => {
logger::error!(message="Dropping metrics for average accumulator", metric=?metrics);
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 168,
"total_crates": null
} |
fn_clm_analytics_load_metrics_3092374752411580706 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/payment_method_selected_count
// Implementation of PaymentMethodSelectedCount for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentMethodChanged)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-7131975147738423566 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/payment_data_filled_count
// Implementation of PaymentDataFilledCount for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentDataFilled)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-4540861527781399224 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/load_time
// Implementation of LoadTime for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Percentile {
field: "latency",
alias: Some("count"),
percentile: Some(&50),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::AppRendered)
.switch()?;
query_builder
.add_custom_filter_clause("latency", 0, FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 364,
"total_crates": null
} |
fn_clm_analytics_load_metrics_7948149243041950119 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/sdk_rendered_count
// Implementation of SdkRenderedCount for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::AppRendered)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-8047295984645824119 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/payment_methods_call_count
// Implementation of PaymentMethodsCallCount for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentMethodsCall)
.switch()?;
query_builder
.add_filter_clause("log_type", "INFO")
.switch()?;
query_builder
.add_filter_clause("category", "API")
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 368,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-1901645174441584379 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/average_payment_time
// Implementation of AveragePaymentTime for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Percentile {
field: "latency",
alias: Some("count"),
percentile: Some(&50),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentAttempt)
.switch()?;
query_builder
.add_custom_filter_clause("latency", 0, FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 364,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-733769413053774596 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/sdk_initiated_count
// Implementation of SdkInitiatedCount for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::OrcaElementsCalled)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_load_metrics_5547065084221992686 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sdk_events/metrics/payment_attempts
// Implementation of PaymentAttempts for super::SdkEventMetric<T>
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentAttempt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_get_metrics_2329259491707190799 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/core
pub async fn get_metrics(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: GetDisputeMetricRequest,
) -> AnalyticsResult<DisputesMetricsResponse<DisputeMetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
DisputeMetricsBucketIdentifier,
DisputeMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_dispute_query",
refund_metric = metric_type.as_ref()
);
// Currently JoinSet works with only static lifetime references even if the task pool does not outlive the given reference
// We can optimize away this clone once that is fixed
let auth_scoped = auth.to_owned();
set.spawn(
async move {
let data = pool
.get_dispute_metrics(
&metric_type,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
}
.instrument(task_span),
);
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
DisputeMetrics::DisputeStatusMetric
| DisputeMetrics::SessionizedDisputeStatusMetric => metrics_builder
.disputes_status_rate
.add_metrics_bucket(&value),
DisputeMetrics::TotalAmountDisputed
| DisputeMetrics::SessionizedTotalAmountDisputed => {
metrics_builder.disputed_amount.add_metrics_bucket(&value)
}
DisputeMetrics::TotalDisputeLostAmount
| DisputeMetrics::SessionizedTotalDisputeLostAmount => metrics_builder
.dispute_lost_amount
.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let mut total_disputed_amount = 0;
let mut total_dispute_lost_amount = 0;
let query_data: Vec<DisputeMetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| {
let collected_values = val.collect();
if let Some(amount) = collected_values.disputed_amount {
total_disputed_amount += amount;
}
if let Some(amount) = collected_values.dispute_lost_amount {
total_dispute_lost_amount += amount;
}
DisputeMetricsBucketResponse {
values: collected_values,
dimensions: id,
}
})
.collect();
Ok(DisputesMetricsResponse {
query_data,
meta_data: [DisputesAnalyticsMetadata {
total_disputed_amount: Some(total_disputed_amount),
total_dispute_lost_amount: Some(total_dispute_lost_amount),
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 120,
"total_crates": null
} |
fn_clm_analytics_get_filters_2329259491707190799 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/core
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetDisputeFilterRequest,
auth: &AuthInfo,
) -> AnalyticsResult<DisputeFiltersResponse> {
let mut res = DisputeFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_dispute_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_dispute_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) => {
let ckh_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres disputes analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => {
let ckh_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres disputes analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: DisputeFilterRow| match dim {
DisputeDimensions::DisputeStage => fil.dispute_stage,
DisputeDimensions::Connector => fil.connector,
DisputeDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()),
})
.collect::<Vec<String>>();
res.query_data.push(DisputeFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 86,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_-4596811083653305252 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/types
// Implementation of DisputeFilters for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.connector.is_empty() {
builder
.add_filter_in_range_clause(DisputeDimensions::Connector, &self.connector)
.attach_printable("Error adding connector filter")?;
}
if !self.dispute_stage.is_empty() {
builder
.add_filter_in_range_clause(DisputeDimensions::DisputeStage, &self.dispute_stage)
.attach_printable("Error adding dispute stage filter")?;
}
if !self.currency.is_empty() {
builder
.add_filter_in_range_clause(DisputeDimensions::Currency, &self.currency)
.attach_printable("Error adding currency filter")?;
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 716,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-7536419697251891499 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/metrics
// Implementation of DisputeMetrics for DisputeMetric<T>
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> {
match self {
Self::TotalAmountDisputed => {
TotalAmountDisputed::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::DisputeStatusMetric => {
DisputeStatusMetric::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::TotalDisputeLostAmount => {
TotalDisputeLostAmount::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedTotalAmountDisputed => {
sessionized_metrics::TotalAmountDisputed::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedDisputeStatusMetric => {
sessionized_metrics::DisputeStatusMetric::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedTotalDisputeLostAmount => {
sessionized_metrics::TotalDisputeLostAmount::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 302,
"total_crates": null
} |
fn_clm_analytics_get_dispute_filter_for_dimension_-5971840646135921840 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/filters
pub async fn get_dispute_filter_for_dimension<T>(
dimension: DisputeDimensions,
auth: &AuthInfo,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<DisputeFilterRow>>
where
T: AnalyticsDataSource + DisputeFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<DisputeFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 50,
"total_crates": null
} |
fn_clm_analytics_collect_476581593178719205 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/accumulators
// Inherent implementation for DisputeMetricsAccumulator
pub fn collect(self) -> DisputeMetricsBucketValue {
let (challenge_rate, won_rate, lost_rate, total_dispute) =
self.disputes_status_rate.collect().unwrap_or_default();
DisputeMetricsBucketValue {
disputes_challenged: challenge_rate,
disputes_won: won_rate,
disputes_lost: lost_rate,
disputed_amount: self.disputed_amount.collect(),
dispute_lost_amount: self.dispute_lost_amount.collect(),
total_dispute,
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1439,
"total_crates": null
} |
fn_clm_analytics_add_metrics_bucket_476581593178719205 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/accumulators
// Implementation of RateAccumulator for DisputeMetricAccumulator
fn add_metrics_bucket(&mut self, metrics: &DisputeMetricRow) {
if let Some(ref dispute_status) = metrics.dispute_status {
if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeChallenged {
self.challenged_count += metrics.count.unwrap_or_default();
}
if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeWon {
self.won_count += metrics.count.unwrap_or_default();
}
if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeLost {
self.lost_count += metrics.count.unwrap_or_default();
}
};
self.total += metrics.count.unwrap_or_default();
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 172,
"total_crates": null
} |
fn_clm_analytics_load_metrics_1822396913330005851 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/metrics/total_dispute_lost_amount
// Implementation of TotalDisputeLostAmount for super::DisputeMetric<T>
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_lost")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-3081130948002875137 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/metrics/dispute_status_metric
// Implementation of DisputeStatusMetric for super::DisputeMetric<T>
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder = QueryBuilder::new(AnalyticsCollection::Dispute);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder.add_select_column("dispute_status").switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions {
query_builder.add_group_by_clause(dim).switch()?;
}
query_builder
.add_group_by_clause("dispute_status")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 358,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-7511829536262985420 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/metrics/total_amount_disputed
// Implementation of TotalAmountDisputed for super::DisputeMetric<T>
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_won")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 358,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-4492868007242351850 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/metrics/sessionized_metrics/total_dispute_lost_amount
// Implementation of TotalDisputeLostAmount for super::DisputeMetric<T>
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::DisputeSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_lost")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_load_metrics_8016671553227630082 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/metrics/sessionized_metrics/dispute_status_metric
// Implementation of DisputeStatusMetric for super::DisputeMetric<T>
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder = QueryBuilder::new(AnalyticsCollection::DisputeSessionized);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder.add_select_column("dispute_status").switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions {
query_builder.add_group_by_clause(dim).switch()?;
}
query_builder
.add_group_by_clause("dispute_status")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 358,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-4995527209823346083 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/disputes/metrics/sessionized_metrics/total_amount_disputed
// Implementation of TotalAmountDisputed for super::DisputeMetric<T>
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::DisputeSessionized);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_won")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 358,
"total_crates": null
} |
fn_clm_analytics_get_metrics_4358319716203446909 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/core
pub async fn get_metrics(
pool: &AnalyticsProvider,
merchant_id: &common_utils::id_type::MerchantId,
req: GetFrmMetricRequest,
) -> AnalyticsResult<MetricsResponse<FrmMetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<FrmMetricsBucketIdentifier, FrmMetricsAccumulator> =
HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span =
tracing::debug_span!("analytics_frm_query", frm_metric = metric_type.as_ref());
// Currently JoinSet works with only static lifetime references even if the task pool does not outlive the given reference
// We can optimize away this clone once that is fixed
let merchant_id_scoped = merchant_id.to_owned();
set.spawn(
async move {
let data = pool
.get_frm_metrics(
&metric_type,
&req.group_by_names.clone(),
&merchant_id_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
}
.instrument(task_span),
);
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
FrmMetrics::FrmBlockedRate => {
metrics_builder.frm_blocked_rate.add_metrics_bucket(&value)
}
FrmMetrics::FrmTriggeredAttempts => metrics_builder
.frm_triggered_attempts
.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let query_data: Vec<FrmMetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| FrmMetricsBucketResponse {
values: val.collect(),
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 118,
"total_crates": null
} |
fn_clm_analytics_get_filters_4358319716203446909 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/core
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetFrmFilterRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<FrmFiltersResponse> {
let mut res = FrmFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_frm_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_frm_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) => {
let ckh_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => {
let ckh_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: FrmFilterRow| match dim {
FrmDimensions::FrmStatus => fil.frm_status.map(|i| i.as_ref().to_string()),
FrmDimensions::FrmName => fil.frm_name,
FrmDimensions::FrmTransactionType => {
fil.frm_transaction_type.map(|i| i.as_ref().to_string())
}
})
.collect::<Vec<String>>();
res.query_data.push(FrmFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 92,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_-1875974268312943121 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/types
// Implementation of FrmFilters for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.frm_status.is_empty() {
builder
.add_filter_in_range_clause(FrmDimensions::FrmStatus, &self.frm_status)
.attach_printable("Error adding frm status filter")?;
}
if !self.frm_name.is_empty() {
builder
.add_filter_in_range_clause(FrmDimensions::FrmName, &self.frm_name)
.attach_printable("Error adding frm name filter")?;
}
if !self.frm_transaction_type.is_empty() {
builder
.add_filter_in_range_clause(
FrmDimensions::FrmTransactionType,
&self.frm_transaction_type,
)
.attach_printable("Error adding frm transaction type filter")?;
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 716,
"total_crates": null
} |
fn_clm_analytics_load_metrics_7582556004316013526 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/metrics
// Implementation of FrmMetrics for FrmMetric<T>
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &FrmFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> {
match self {
Self::FrmTriggeredAttempts => {
FrmTriggeredAttempts::default()
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::FrmBlockedRate => {
FrmBlockedRate::default()
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 286,
"total_crates": null
} |
fn_clm_analytics_get_frm_filter_for_dimension_-7204242016518971210 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/filters
pub async fn get_frm_filter_for_dimension<T>(
dimension: FrmDimensions,
merchant_id: &common_utils::id_type::MerchantId,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<FrmFilterRow>>
where
T: AnalyticsDataSource + FrmFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<FrmFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 50,
"total_crates": null
} |
fn_clm_analytics_collect_7376462788155120898 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/accumulator
// Inherent implementation for FrmMetricsAccumulator
pub fn collect(self) -> FrmMetricsBucketValue {
FrmMetricsBucketValue {
frm_blocked_rate: self.frm_blocked_rate.collect(),
frm_triggered_attempts: self.frm_triggered_attempts.collect(),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1435,
"total_crates": null
} |
fn_clm_analytics_add_metrics_bucket_7376462788155120898 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/accumulator
// Implementation of BlockedRateAccumulator for FrmMetricAccumulator
fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow) {
if let Some(ref frm_status) = metrics.frm_status {
if frm_status.as_ref() == &storage_enums::FraudCheckStatus::Fraud {
self.fraud += metrics.count.unwrap_or_default();
}
};
self.total += metrics.count.unwrap_or_default();
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 164,
"total_crates": null
} |
fn_clm_analytics_load_metrics_731015646083759299 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/metrics/frm_triggered_attempts
// Implementation of FrmTriggeredAttempts for super::FrmMetric<T>
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &FrmFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<FrmMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
FrmMetricsBucketIdentifier::new(
i.frm_name.as_ref().map(|i| i.to_string()),
i.frm_status.as_ref().map(|i| i.0.to_string()),
i.frm_transaction_type.as_ref().map(|i| i.0.to_string()),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<Vec<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 368,
"total_crates": null
} |
fn_clm_analytics_load_metrics_1051160479070550136 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/frm/metrics/frm_blocked_rate
// Implementation of FrmBlockedRate for super::FrmMetric<T>
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &FrmFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>>
where
T: AnalyticsDataSource + super::FrmMetricAnalytics,
{
let mut query_builder = QueryBuilder::new(AnalyticsCollection::FraudCheck);
let mut dimensions = dimensions.to_vec();
dimensions.push(FrmDimensions::FrmStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.execute_query::<FrmMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
FrmMetricsBucketIdentifier::new(
i.frm_name.as_ref().map(|i| i.to_string()),
None,
i.frm_transaction_type.as_ref().map(|i| i.0.to_string()),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 360,
"total_crates": null
} |
fn_clm_analytics_get_metrics_-5947427886667826822 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/core
pub async fn get_metrics(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: GetAuthEventMetricRequest,
) -> AnalyticsResult<AuthEventMetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
AuthEventMetricsBucketIdentifier,
AuthEventMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let auth_scoped = auth.to_owned();
let pool = pool.clone();
set.spawn(async move {
let data = pool
.get_auth_event_metrics(
&metric_type,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
});
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
for (id, value) in data? {
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
AuthEventMetrics::AuthenticationCount => metrics_builder
.authentication_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationAttemptCount => metrics_builder
.authentication_attempt_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationSuccessCount => metrics_builder
.authentication_success_count
.add_metrics_bucket(&value),
AuthEventMetrics::ChallengeFlowCount => metrics_builder
.challenge_flow_count
.add_metrics_bucket(&value),
AuthEventMetrics::ChallengeAttemptCount => metrics_builder
.challenge_attempt_count
.add_metrics_bucket(&value),
AuthEventMetrics::ChallengeSuccessCount => metrics_builder
.challenge_success_count
.add_metrics_bucket(&value),
AuthEventMetrics::FrictionlessFlowCount => metrics_builder
.frictionless_flow_count
.add_metrics_bucket(&value),
AuthEventMetrics::FrictionlessSuccessCount => metrics_builder
.frictionless_success_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationErrorMessage => metrics_builder
.authentication_error_message
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationFunnel => metrics_builder
.authentication_funnel
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationExemptionApprovedCount => metrics_builder
.authentication_exemption_approved_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationExemptionRequestedCount => metrics_builder
.authentication_exemption_requested_count
.add_metrics_bucket(&value),
}
}
}
let mut total_error_message_count = 0;
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| {
let collected_values = val.collect();
if let Some(count) = collected_values.error_message_count {
total_error_message_count += count;
}
MetricsBucketResponse {
values: collected_values,
dimensions: id,
}
})
.collect();
Ok(AuthEventMetricsResponse {
query_data,
meta_data: [AuthEventsAnalyticsMetadata {
total_error_message_count: Some(total_error_message_count),
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 130,
"total_crates": null
} |
fn_clm_analytics_get_filters_-5947427886667826822 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/core
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetAuthEventFilterRequest,
auth: &AuthInfo,
) -> AnalyticsResult<AuthEventFiltersResponse> {
let mut res = AuthEventFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(_pool) => {
Err(report!(AnalyticsError::UnknownError))
}
AnalyticsProvider::Clickhouse(pool) => {
get_auth_events_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
.map_err(|e| e.change_context(AnalyticsError::UnknownError))
}
AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) | AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => {
let ckh_result = get_auth_events_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await
.map_err(|e| e.change_context(AnalyticsError::UnknownError));
let sqlx_result = get_auth_events_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_pool,
)
.await
.map_err(|e| e.change_context(AnalyticsError::UnknownError));
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics filters")
},
_ => {}
};
ckh_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: AuthEventFilterRow| match dim {
AuthEventDimensions::AuthenticationStatus => fil.authentication_status.map(|i| i.as_ref().to_string()),
AuthEventDimensions::TransactionStatus => fil.trans_status.map(|i| i.as_ref().to_string()),
AuthEventDimensions::AuthenticationType => fil.authentication_type.map(|i| i.as_ref().to_string()),
AuthEventDimensions::ErrorMessage => fil.error_message,
AuthEventDimensions::AuthenticationConnector => fil.authentication_connector.map(|i| i.as_ref().to_string()),
AuthEventDimensions::MessageVersion => fil.message_version,
AuthEventDimensions::AcsReferenceNumber => fil.acs_reference_number,
AuthEventDimensions::Platform => fil.platform,
AuthEventDimensions::Mcc => fil.mcc,
AuthEventDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()),
AuthEventDimensions::MerchantCountry => fil.merchant_country,
AuthEventDimensions::BillingCountry => fil.billing_country,
AuthEventDimensions::ShippingCountry => fil.shipping_country,
AuthEventDimensions::IssuerCountry => fil.issuer_country,
AuthEventDimensions::EarliestSupportedVersion => fil.earliest_supported_version,
AuthEventDimensions::LatestSupportedVersion => fil.latest_supported_version,
AuthEventDimensions::WhitelistDecision => fil.whitelist_decision.map(|i| i.to_string()),
AuthEventDimensions::DeviceManufacturer => fil.device_manufacturer,
AuthEventDimensions::DeviceType => fil.device_type,
AuthEventDimensions::DeviceBrand => fil.device_brand,
AuthEventDimensions::DeviceOs => fil.device_os,
AuthEventDimensions::DeviceDisplay => fil.device_display,
AuthEventDimensions::BrowserName => fil.browser_name,
AuthEventDimensions::BrowserVersion => fil.browser_version,
AuthEventDimensions::IssuerId => fil.issuer_id,
AuthEventDimensions::SchemeName => fil.scheme_name,
AuthEventDimensions::ExemptionRequested => fil.exemption_requested.map(|i| i.to_string()),
AuthEventDimensions::ExemptionAccepted => fil.exemption_accepted.map(|i| i.to_string()),
})
.collect::<Vec<String>>();
res.query_data.push(AuthEventFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 128,
"total_crates": null
} |
fn_clm_analytics_get_sankey_-5947427886667826822 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/core
pub async fn get_sankey(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: TimeRange,
) -> AnalyticsResult<Vec<SankeyRow>> {
match pool {
AnalyticsProvider::Sqlx(_) => Err(AnalyticsError::NotImplemented(
"Sankey not implemented for sqlx",
))?,
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool) => {
let sankey_rows = get_sankey_data(ckh_pool, auth, &req)
.await
.change_context(AnalyticsError::UnknownError)?;
Ok(sankey_rows)
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 34,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_-288882700675595536 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/types
// Implementation of AuthEventFilters for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.authentication_status.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::AuthenticationStatus,
&self.authentication_status,
)
.attach_printable("Error adding authentication status filter")?;
}
if !self.trans_status.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::TransactionStatus,
&self.trans_status,
)
.attach_printable("Error adding transaction status filter")?;
}
if !self.error_message.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::ErrorMessage, &self.error_message)
.attach_printable("Error adding error message filter")?;
}
if !self.authentication_connector.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::AuthenticationConnector,
&self.authentication_connector,
)
.attach_printable("Error adding authentication connector filter")?;
}
if !self.message_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::MessageVersion,
&self.message_version,
)
.attach_printable("Error adding message version filter")?;
}
if !self.platform.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::Platform, &self.platform)
.attach_printable("Error adding platform filter")?;
}
if !self.acs_reference_number.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::AcsReferenceNumber,
&self.acs_reference_number,
)
.attach_printable("Error adding acs reference number filter")?;
}
if !self.mcc.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::Mcc, &self.mcc)
.attach_printable("Failed to add MCC filter")?;
}
if !self.currency.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::Currency, &self.currency)
.attach_printable("Failed to add currency filter")?;
}
if !self.merchant_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::MerchantCountry,
&self.merchant_country,
)
.attach_printable("Failed to add merchant country filter")?;
}
if !self.billing_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::BillingCountry,
&self.billing_country,
)
.attach_printable("Failed to add billing country filter")?;
}
if !self.shipping_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::ShippingCountry,
&self.shipping_country,
)
.attach_printable("Failed to add shipping country filter")?;
}
if !self.issuer_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::IssuerCountry,
&self.issuer_country,
)
.attach_printable("Failed to add issuer country filter")?;
}
if !self.earliest_supported_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::EarliestSupportedVersion,
&self.earliest_supported_version,
)
.attach_printable("Failed to add earliest supported version filter")?;
}
if !self.latest_supported_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::LatestSupportedVersion,
&self.latest_supported_version,
)
.attach_printable("Failed to add latest supported version filter")?;
}
if !self.whitelist_decision.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::WhitelistDecision,
&self.whitelist_decision,
)
.attach_printable("Failed to add whitelist decision filter")?;
}
if !self.device_manufacturer.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::DeviceManufacturer,
&self.device_manufacturer,
)
.attach_printable("Failed to add device manufacturer filter")?;
}
if !self.device_type.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::DeviceType, &self.device_type)
.attach_printable("Failed to add device type filter")?;
}
if !self.device_brand.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::DeviceBrand, &self.device_brand)
.attach_printable("Failed to add device brand filter")?;
}
if !self.device_os.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::DeviceOs, &self.device_os)
.attach_printable("Failed to add device OS filter")?;
}
if !self.device_display.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::DeviceDisplay,
&self.device_display,
)
.attach_printable("Failed to add device display filter")?;
}
if !self.browser_name.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::BrowserName, &self.browser_name)
.attach_printable("Failed to add browser name filter")?;
}
if !self.browser_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::BrowserVersion,
&self.browser_version,
)
.attach_printable("Failed to add browser version filter")?;
}
if !self.issuer_id.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::IssuerId, &self.issuer_id)
.attach_printable("Failed to add issuer ID filter")?;
}
if !self.scheme_name.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::SchemeName, &self.scheme_name)
.attach_printable("Failed to add scheme name filter")?;
}
if !self.exemption_requested.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::ExemptionRequested,
&self.exemption_requested,
)
.attach_printable("Failed to add exemption requested filter")?;
}
if !self.exemption_accepted.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::ExemptionAccepted,
&self.exemption_accepted,
)
.attach_printable("Failed to add exemption accepted filter")?;
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 860,
"total_crates": null
} |
fn_clm_analytics_load_metrics_2442110342637045024 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/metrics
// Implementation of AuthEventMetrics for AuthEventMetric<T>
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
match self {
Self::AuthenticationCount => {
AuthenticationCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationAttemptCount => {
AuthenticationAttemptCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationSuccessCount => {
AuthenticationSuccessCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::ChallengeFlowCount => {
ChallengeFlowCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::ChallengeAttemptCount => {
ChallengeAttemptCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::ChallengeSuccessCount => {
ChallengeSuccessCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::FrictionlessFlowCount => {
FrictionlessFlowCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::FrictionlessSuccessCount => {
FrictionlessSuccessCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationErrorMessage => {
AuthenticationErrorMessage
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationFunnel => {
AuthenticationFunnel
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationExemptionApprovedCount => {
AuthenticationExemptionApprovedCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationExemptionRequestedCount => {
AuthenticationExemptionRequestedCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 302,
"total_crates": null
} |
fn_clm_analytics_try_into_-1385639417840386352 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/sankey
// Implementation of serde_json::Value for TryInto<SankeyRow>
fn try_into(self) -> Result<SankeyRow, Self::Error> {
logger::debug!("Parsing SankeyRow from {:?}", self);
serde_json::from_value(self).change_context(ParsingError::StructParseFailure(
"Failed to parse Sankey in clickhouse results",
))
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 173,
"total_crates": null
} |
fn_clm_analytics_get_sankey_data_-1385639417840386352 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/sankey
pub async fn get_sankey_data(
clickhouse_client: &ClickhouseClient,
auth: &AuthInfo,
time_range: &TimeRange,
) -> MetricsResult<Vec<SankeyRow>> {
let mut query_builder =
QueryBuilder::<ClickhouseClient>::new(AnalyticsCollection::Authentications);
query_builder
.add_select_column(Aggregate::<String>::Count {
field: None,
alias: Some("count"),
})
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("exemption_requested")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("exemption_accepted")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("authentication_status")
.change_context(MetricsError::QueryBuildingError)?;
auth.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
time_range
.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("exemption_requested")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("exemption_accepted")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("authentication_status")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.execute_query::<SankeyRow, _>(clickhouse_client)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(Ok)
.collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 64,
"total_crates": null
} |
fn_clm_analytics_get_auth_events_filter_for_dimension_6983007128727853019 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/filters
pub async fn get_auth_events_filter_for_dimension<T>(
dimension: AuthEventDimensions,
auth: &AuthInfo,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<AuthEventFilterRow>>
where
T: AnalyticsDataSource + AuthEventFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder.set_distinct();
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.execute_query::<AuthEventFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 41,
"total_crates": null
} |
fn_clm_analytics_collect_2882725180822520582 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/accumulator
// Inherent implementation for AuthEventMetricsAccumulator
pub fn collect(self) -> AuthEventMetricsBucketValue {
AuthEventMetricsBucketValue {
authentication_count: self.authentication_count.collect(),
authentication_attempt_count: self.authentication_attempt_count.collect(),
authentication_success_count: self.authentication_success_count.collect(),
challenge_flow_count: self.challenge_flow_count.collect(),
challenge_attempt_count: self.challenge_attempt_count.collect(),
challenge_success_count: self.challenge_success_count.collect(),
frictionless_flow_count: self.frictionless_flow_count.collect(),
frictionless_success_count: self.frictionless_success_count.collect(),
error_message_count: self.authentication_error_message.collect(),
authentication_funnel: self.authentication_funnel.collect(),
authentication_exemption_approved_count: self
.authentication_exemption_approved_count
.collect(),
authentication_exemption_requested_count: self
.authentication_exemption_requested_count
.collect(),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1455,
"total_crates": null
} |
fn_clm_analytics_add_metrics_bucket_2882725180822520582 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/accumulator
// Implementation of AuthenticationErrorMessageAccumulator for AuthEventMetricAccumulator
fn add_metrics_bucket(&mut self, metrics: &AuthEventMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 158,
"total_crates": null
} |
fn_clm_analytics_load_metrics_6333942758765572780 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/metrics/authentication_exemption_approved_count
// Implementation of AuthenticationExemptionApprovedCount for super::AuthEventMetric<T>
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(AuthEventDimensions::ExemptionAccepted, true)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 414,
"total_crates": null
} |
fn_clm_analytics_load_metrics_3138220418621922525 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/auth_events/metrics/frictionless_flow_count
// Implementation of FrictionlessFlowCount for super::AuthEventMetric<T>
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(
"authentication_type",
DecoupledAuthenticationType::Frictionless,
)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 414,
"total_crates": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.