id
stringlengths
11
116
type
stringclasses
1 value
granularity
stringclasses
4 values
content
stringlengths
16
477k
metadata
dict
fn_clm_analytics_load_metrics_-9162997386531381378
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/frictionless_success_count // Implementation of FrictionlessSuccessCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Frictionless, ) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Success) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 418, "total_crates": null }
fn_clm_analytics_load_metrics_-780782593422436975
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/challenge_flow_count // Implementation of ChallengeFlowCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Challenge, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 414, "total_crates": null }
fn_clm_analytics_load_metrics_-190216560516095011
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/authentication_exemption_requested_count // Implementation of AuthenticationExemptionRequestedCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause(AuthEventDimensions::ExemptionRequested, true) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 414, "total_crates": null }
fn_clm_analytics_load_metrics_-8004873262888702802
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/authentication_count // Implementation of AuthenticationCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 410, "total_crates": null }
fn_clm_analytics_load_metrics_-2936641531506899221
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/challenge_attempt_count // Implementation of ChallengeAttemptCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Challenge, ) .switch()?; query_builder .add_filter_in_range_clause( "authentication_status", &[AuthenticationStatus::Success, AuthenticationStatus::Failed], ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 418, "total_crates": null }
fn_clm_analytics_load_metrics_3436077045117221457
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/authentication_attempt_count // Implementation of AuthenticationAttemptCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_in_range_clause( "authentication_status", &[AuthenticationStatus::Success, AuthenticationStatus::Failed], ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 414, "total_crates": null }
fn_clm_analytics_load_metrics_5724897189185675564
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/authentication_success_count // Implementation of AuthenticationSuccessCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Success) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 414, "total_crates": null }
fn_clm_analytics_load_metrics_6905015998359067001
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/authentication_error_message // Implementation of AuthenticationErrorMessage for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column("sum(sign_flag) AS count") .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Failed) .switch()?; query_builder .add_custom_filter_clause( AuthEventDimensions::ErrorMessage, "NULL", FilterTypes::IsNotNull, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_order_by_clause("count", Order::Descending) .attach_printable("Error adding order by clause") .switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 424, "total_crates": null }
fn_clm_analytics_load_metrics_54185466634052942
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/authentication_funnel // Implementation of AuthenticationFunnel for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_custom_filter_clause( AuthEventDimensions::TransactionStatus, "NULL", FilterTypes::IsNotNull, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 414, "total_crates": null }
fn_clm_analytics_load_metrics_3270800265985043035
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/auth_events/metrics/challenge_success_count // Implementation of ChallengeSuccessCount for super::AuthEventMetric<T> async fn load_metrics( &self, auth: &AuthInfo, dimensions: &[AuthEventDimensions], filters: &AuthEventFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Authentications); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; query_builder .add_filter_clause("authentication_status", AuthenticationStatus::Success) .switch()?; query_builder .add_filter_clause( "authentication_type", DecoupledAuthenticationType::Challenge, ) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<AuthEventMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( AuthEventMetricsBucketIdentifier::new( i.authentication_status.as_ref().map(|i| i.0), i.trans_status.as_ref().map(|i| i.0.clone()), i.authentication_type.as_ref().map(|i| i.0), i.error_message.clone(), i.authentication_connector.as_ref().map(|i| i.0), i.message_version.clone(), i.acs_reference_number.clone(), i.mcc.clone(), i.currency.as_ref().map(|i| i.0), i.merchant_country.clone(), i.billing_country.clone(), i.shipping_country.clone(), i.issuer_country.clone(), i.earliest_supported_version.clone(), i.latest_supported_version.clone(), i.whitelist_decision, i.device_manufacturer.clone(), i.device_type.clone(), i.device_brand.clone(), i.device_os.clone(), i.device_display.clone(), i.browser_name.clone(), i.browser_version.clone(), i.issuer_id.clone(), i.scheme_name.clone(), i.exemption_requested, i.exemption_accepted, TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 418, "total_crates": null }
fn_clm_analytics_get_metrics_-7151100927792465341
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/core pub async fn get_metrics( pool: &AnalyticsProvider, ex_rates: &Option<ExchangeRates>, auth: &AuthInfo, req: GetRefundMetricRequest, ) -> AnalyticsResult<RefundsMetricsResponse<RefundMetricsBucketResponse>> { let mut metrics_accumulator: HashMap<RefundMetricsBucketIdentifier, RefundMetricsAccumulator> = HashMap::new(); let mut set = tokio::task::JoinSet::new(); for metric_type in req.metrics.iter().cloned() { let req = req.clone(); let pool = pool.clone(); let task_span = tracing::debug_span!( "analytics_refund_query", refund_metric = metric_type.as_ref() ); // Currently JoinSet works with only static lifetime references even if the task pool does not outlive the given reference // We can optimize away this clone once that is fixed let auth_scoped = auth.to_owned(); set.spawn( async move { let data = pool .get_refund_metrics( &metric_type, &req.group_by_names.clone(), &auth_scoped, &req.filters, req.time_series.map(|t| t.granularity), &req.time_range, ) .await .change_context(AnalyticsError::UnknownError); TaskType::MetricTask(metric_type, data) } .instrument(task_span), ); } if let Some(distribution) = req.clone().distribution { let req = req.clone(); let pool = pool.clone(); let task_span = tracing::debug_span!( "analytics_refunds_distribution_query", refund_distribution = distribution.distribution_for.as_ref() ); let auth_scoped = auth.to_owned(); set.spawn( async move { let data = pool .get_refund_distribution( &distribution, &req.group_by_names.clone(), &auth_scoped, &req.filters, &req.time_series.map(|t| t.granularity), &req.time_range, ) .await .change_context(AnalyticsError::UnknownError); TaskType::DistributionTask(distribution.distribution_for, data) } .instrument(task_span), ); } while let Some(task_type) = set .join_next() .await .transpose() .change_context(AnalyticsError::UnknownError)? { match task_type { TaskType::MetricTask(metric, data) => { let data = data?; let attributes = router_env::metric_attributes!( ("metric_type", metric.to_string()), ("source", pool.to_string()), ); let value = u64::try_from(data.len()); if let Ok(val) = value { metrics::BUCKETS_FETCHED.record(val, attributes); logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val); } for (id, value) in data { logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}"); let metrics_builder = metrics_accumulator.entry(id).or_default(); match metric { RefundMetrics::RefundSuccessRate | RefundMetrics::SessionizedRefundSuccessRate => metrics_builder .refund_success_rate .add_metrics_bucket(&value), RefundMetrics::RefundCount | RefundMetrics::SessionizedRefundCount => { metrics_builder.refund_count.add_metrics_bucket(&value) } RefundMetrics::RefundSuccessCount | RefundMetrics::SessionizedRefundSuccessCount => { metrics_builder.refund_success.add_metrics_bucket(&value) } RefundMetrics::RefundProcessedAmount | RefundMetrics::SessionizedRefundProcessedAmount => { metrics_builder.processed_amount.add_metrics_bucket(&value) } RefundMetrics::SessionizedRefundReason => { metrics_builder.refund_reason.add_metrics_bucket(&value) } RefundMetrics::SessionizedRefundErrorMessage => metrics_builder .refund_error_message .add_metrics_bucket(&value), } } logger::debug!( "Analytics Accumulated Results: metric: {}, results: {:#?}", metric, metrics_accumulator ); } TaskType::DistributionTask(distribution, data) => { let data = data?; let attributes = router_env::metric_attributes!( ("distribution_type", distribution.to_string()), ("source", pool.to_string()), ); let value = u64::try_from(data.len()); if let Ok(val) = value { metrics::BUCKETS_FETCHED.record(val, attributes); logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val); } for (id, value) in data { logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for distribution {distribution}"); let metrics_builder = metrics_accumulator.entry(id).or_default(); match distribution { RefundDistributions::SessionizedRefundReason => metrics_builder .refund_reason_distribution .add_distribution_bucket(&value), RefundDistributions::SessionizedRefundErrorMessage => metrics_builder .refund_error_message_distribution .add_distribution_bucket(&value), } } logger::debug!( "Analytics Accumulated Results: distribution: {}, results: {:#?}", distribution, metrics_accumulator ); } } } let mut success = 0; let mut total = 0; let mut total_refund_processed_amount = 0; let mut total_refund_processed_amount_in_usd = 0; let mut total_refund_processed_count = 0; let mut total_refund_reason_count = 0; let mut total_refund_error_message_count = 0; let query_data: Vec<RefundMetricsBucketResponse> = metrics_accumulator .into_iter() .map(|(id, val)| { let mut collected_values = val.collect(); if let Some(success_count) = collected_values.successful_refunds { success += success_count; } if let Some(total_count) = collected_values.total_refunds { total += total_count; } if let Some(amount) = collected_values.refund_processed_amount { let amount_in_usd = if let Some(ex_rates) = ex_rates { id.currency .and_then(|currency| { i64::try_from(amount) .inspect_err(|e| logger::error!("Amount conversion error: {:?}", e)) .ok() .and_then(|amount_i64| { convert(ex_rates, currency, Currency::USD, amount_i64) .inspect_err(|e| { logger::error!("Currency conversion error: {:?}", e) }) .ok() }) }) .map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64()) .unwrap_or_default() } else { None }; collected_values.refund_processed_amount_in_usd = amount_in_usd; total_refund_processed_amount += amount; total_refund_processed_amount_in_usd += amount_in_usd.unwrap_or(0); } if let Some(count) = collected_values.refund_processed_count { total_refund_processed_count += count; } if let Some(total_count) = collected_values.refund_reason_count { total_refund_reason_count += total_count; } if let Some(total_count) = collected_values.refund_error_message_count { total_refund_error_message_count += total_count; } RefundMetricsBucketResponse { values: collected_values, dimensions: id, } }) .collect(); let total_refund_success_rate = match (success, total) { (s, t) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)), _ => None, }; Ok(RefundsMetricsResponse { query_data, meta_data: [RefundsAnalyticsMetadata { total_refund_success_rate, total_refund_processed_amount: Some(total_refund_processed_amount), total_refund_processed_amount_in_usd: if ex_rates.is_some() { Some(total_refund_processed_amount_in_usd) } else { None }, total_refund_processed_count: Some(total_refund_processed_count), total_refund_reason_count: Some(total_refund_reason_count), total_refund_error_message_count: Some(total_refund_error_message_count), }], }) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 196, "total_crates": null }
fn_clm_analytics_get_filters_-7151100927792465341
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/core pub async fn get_filters( pool: &AnalyticsProvider, req: GetRefundFilterRequest, auth: &AuthInfo, ) -> AnalyticsResult<RefundFiltersResponse> { let mut res = RefundFiltersResponse::default(); for dim in req.group_by_names { let values = match pool { AnalyticsProvider::Sqlx(pool) => { get_refund_filter_for_dimension(dim, auth, &req.time_range, pool) .await } AnalyticsProvider::Clickhouse(pool) => { get_refund_filter_for_dimension(dim, auth, &req.time_range, pool) .await } AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) => { let ckh_result = get_refund_filter_for_dimension( dim, auth, &req.time_range, ckh_pool, ) .await; let sqlx_result = get_refund_filter_for_dimension( dim, auth, &req.time_range, sqlx_pool, ) .await; match (&sqlx_result, &ckh_result) { (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics filters") }, _ => {} }; ckh_result } AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => { let ckh_result = get_refund_filter_for_dimension( dim, auth, &req.time_range, ckh_pool, ) .await; let sqlx_result = get_refund_filter_for_dimension( dim, auth, &req.time_range, sqlx_pool, ) .await; match (&sqlx_result, &ckh_result) { (Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => { router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics filters") }, _ => {} }; sqlx_result } } .change_context(AnalyticsError::UnknownError)? .into_iter() .filter_map(|fil: RefundFilterRow| match dim { RefundDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()), RefundDimensions::RefundStatus => fil.refund_status.map(|i| i.as_ref().to_string()), RefundDimensions::Connector => fil.connector, RefundDimensions::RefundType => fil.refund_type.map(|i| i.as_ref().to_string()), RefundDimensions::ProfileId => fil.profile_id, RefundDimensions::RefundReason => fil.refund_reason, RefundDimensions::RefundErrorMessage => fil.refund_error_message, }) .collect::<Vec<String>>(); res.query_data.push(RefundFilterValue { dimension: dim, values, }) } Ok(res) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 98, "total_crates": null }
fn_clm_analytics_load_distribution_-1526325090867365561
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/distribution // Implementation of RefundDistributions for RefundDistribution<T> async fn load_distribution( &self, distribution: &RefundDistributionBody, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: &Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(RefundMetricsBucketIdentifier, RefundDistributionRow)>> { match self { Self::SessionizedRefundReason => { sessionized_distribution::RefundReason .load_distribution( distribution, dimensions, auth, filters, granularity, time_range, pool, ) .await } Self::SessionizedRefundErrorMessage => { sessionized_distribution::RefundErrorMessage .load_distribution( distribution, dimensions, auth, filters, granularity, time_range, pool, ) .await } } }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 33, "total_crates": null }
fn_clm_analytics_set_filter_clause_-396587211870234607
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/types // Implementation of RefundFilters for QueryFilter<T> fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> { if !self.currency.is_empty() { builder .add_filter_in_range_clause(RefundDimensions::Currency, &self.currency) .attach_printable("Error adding currency filter")?; } if !self.refund_status.is_empty() { builder .add_filter_in_range_clause(RefundDimensions::RefundStatus, &self.refund_status) .attach_printable("Error adding refund status filter")?; } if !self.connector.is_empty() { builder .add_filter_in_range_clause(RefundDimensions::Connector, &self.connector) .attach_printable("Error adding connector filter")?; } if !self.refund_type.is_empty() { builder .add_filter_in_range_clause(RefundDimensions::RefundType, &self.refund_type) .attach_printable("Error adding auth type filter")?; } if !self.profile_id.is_empty() { builder .add_filter_in_range_clause(RefundDimensions::ProfileId, &self.profile_id) .attach_printable("Error adding profile id filter")?; } if !self.refund_reason.is_empty() { builder .add_filter_in_range_clause(RefundDimensions::RefundReason, &self.refund_reason) .attach_printable("Error adding refund reason filter")?; } if !self.refund_error_message.is_empty() { builder .add_filter_in_range_clause( RefundDimensions::RefundErrorMessage, &self.refund_error_message, ) .attach_printable("Error adding refund error message filter")?; } Ok(()) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 740, "total_crates": null }
fn_clm_analytics_load_metrics_-232845382255102512
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics // Implementation of RefundMetrics for RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { match self { Self::RefundSuccessRate => { RefundSuccessRate::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::RefundCount => { RefundCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::RefundSuccessCount => { RefundSuccessCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::RefundProcessedAmount => { RefundProcessedAmount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundSuccessRate => { sessionized_metrics::RefundSuccessRate::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundCount => { sessionized_metrics::RefundCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundSuccessCount => { sessionized_metrics::RefundSuccessCount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundProcessedAmount => { sessionized_metrics::RefundProcessedAmount::default() .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundReason => { sessionized_metrics::RefundReason .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } Self::SessionizedRefundErrorMessage => { sessionized_metrics::RefundErrorMessage .load_metrics(dimensions, auth, filters, granularity, time_range, pool) .await } } }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 314, "total_crates": null }
fn_clm_analytics_get_refund_filter_for_dimension_-8225957180231090501
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/filters pub async fn get_refund_filter_for_dimension<T>( dimension: RefundDimensions, auth: &AuthInfo, time_range: &TimeRange, pool: &T, ) -> FiltersResult<Vec<RefundFilterRow>> where T: AnalyticsDataSource + RefundFilterAnalytics, PrimitiveDateTime: ToSql<T>, AnalyticsCollection: ToSql<T>, Granularity: GroupByClause<T>, Aggregate<&'static str>: ToSql<T>, Window<&'static str>: ToSql<T>, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Refund); query_builder.add_select_column(dimension).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; auth.set_filter_clause(&mut query_builder).switch()?; query_builder.set_distinct(); query_builder .execute_query::<RefundFilterRow, _>(pool) .await .change_context(FiltersError::QueryBuildingError)? .change_context(FiltersError::QueryExecutionFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 50, "total_crates": null }
fn_clm_analytics_collect_7986708608635366497
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/accumulator // Inherent implementation for RefundMetricsAccumulator pub fn collect(self) -> RefundMetricsBucketValue { let (successful_refunds, total_refunds, refund_success_rate) = self.refund_success_rate.collect(); let (refund_processed_amount, refund_processed_count, refund_processed_amount_in_usd) = self.processed_amount.collect(); RefundMetricsBucketValue { successful_refunds, total_refunds, refund_success_rate, refund_count: self.refund_count.collect(), refund_success_count: self.refund_success.collect(), refund_processed_amount, refund_processed_amount_in_usd, refund_processed_count, refund_reason_distribution: self.refund_reason_distribution.collect(), refund_error_message_distribution: self.refund_error_message_distribution.collect(), refund_reason_count: self.refund_reason.collect(), refund_error_message_count: self.refund_error_message.collect(), } }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 1447, "total_crates": null }
fn_clm_analytics_add_metrics_bucket_7986708608635366497
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/accumulator // Implementation of RefundReasonAccumulator for RefundMetricAccumulator fn add_metrics_bucket(&mut self, metrics: &RefundMetricRow) { if let Some(count) = metrics.count { if let Ok(count_u64) = u64::try_from(count) { self.count += count_u64; } } }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 160, "total_crates": null }
fn_clm_analytics_add_distribution_bucket_7986708608635366497
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/accumulator // Implementation of RefundErrorMessageDistributionAccumulator for RefundDistributionAccumulator fn add_distribution_bucket(&mut self, distribution: &RefundDistributionRow) { self.refund_error_message_vec .push(RefundErrorMessageDistributionRow { count: distribution.count.unwrap_or_default(), total: distribution .total .clone() .map(|i| i.to_i64().unwrap_or_default()) .unwrap_or_default(), refund_error_message: distribution .refund_error_message .clone() .unwrap_or_default(), }) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 35, "total_crates": null }
fn_clm_analytics_load_metrics_5948522343907465843
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/refund_processed_amount // Implementation of RefundProcessedAmount for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Refund); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Sum { field: "refund_amount", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } query_builder.add_group_by_clause("currency").switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 376, "total_crates": null }
fn_clm_analytics_load_metrics_-4154443065001131132
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/refund_success_rate // Implementation of RefundSuccessRate for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::Refund); let mut dimensions = dimensions.to_vec(); dimensions.push(RefundDimensions::RefundStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 366, "total_crates": null }
fn_clm_analytics_load_metrics_-2924461315285889809
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/refund_success_count // Implementation of RefundSuccessCount for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::Refund); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 366, "total_crates": null }
fn_clm_analytics_load_metrics_4417963076686332458
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/refund_count // Implementation of RefundCount for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Refund); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.refund_status.as_ref().map(|i| i.0.to_string()), i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 374, "total_crates": null }
fn_clm_analytics_load_metrics_-2336598209497982415
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_processed_amount // Implementation of RefundProcessedAmount for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Sum { field: "refund_amount", alias: Some("total"), }) .switch()?; query_builder.add_select_column("currency").switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } query_builder.add_group_by_clause("currency").switch()?; if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 380, "total_crates": null }
fn_clm_analytics_load_metrics_3261572122544114000
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_success_rate // Implementation of RefundSuccessRate for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::RefundSessionized); let mut dimensions = dimensions.to_vec(); dimensions.push(RefundDimensions::RefundStatus); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 366, "total_crates": null }
fn_clm_analytics_load_metrics_-6260478608631028523
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_error_message // Implementation of RefundErrorMessage for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { let mut inner_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); inner_query_builder .add_select_column("sum(sign_flag)") .switch()?; inner_query_builder .add_custom_filter_clause( RefundDimensions::RefundErrorMessage, "NULL", FilterTypes::IsNotNull, ) .switch()?; time_range .set_filter_clause(&mut inner_query_builder) .attach_printable("Error filtering time range for inner query") .switch()?; let inner_query_string = inner_query_builder .build_query() .attach_printable("Error building inner query") .change_context(MetricsError::QueryBuildingError)?; let mut outer_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { outer_query_builder.add_select_column(dim).switch()?; } outer_query_builder .add_select_column("sum(sign_flag) AS count") .switch()?; outer_query_builder .add_select_column(format!("({inner_query_string}) AS total")) .switch()?; outer_query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; outer_query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters .set_filter_clause(&mut outer_query_builder) .switch()?; auth.set_filter_clause(&mut outer_query_builder).switch()?; time_range .set_filter_clause(&mut outer_query_builder) .attach_printable("Error filtering time range for outer query") .switch()?; outer_query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Failure, ) .switch()?; outer_query_builder .add_custom_filter_clause( RefundDimensions::RefundErrorMessage, "NULL", FilterTypes::IsNotNull, ) .switch()?; for dim in dimensions.iter() { outer_query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut outer_query_builder) .attach_printable("Error adding granularity") .switch()?; } outer_query_builder .add_order_by_clause("count", Order::Descending) .attach_printable("Error adding order by clause") .switch()?; let filtered_dimensions: Vec<&RefundDimensions> = dimensions .iter() .filter(|&&dim| dim != RefundDimensions::RefundErrorMessage) .collect(); for dim in &filtered_dimensions { outer_query_builder .add_order_by_clause(*dim, Order::Ascending) .attach_printable("Error adding order by clause") .switch()?; } outer_query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 420, "total_crates": null }
fn_clm_analytics_load_metrics_8831689426357913419
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_success_count // Implementation of RefundSuccessCount for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> where T: AnalyticsDataSource + super::RefundMetricAnalytics, { let mut query_builder = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range.set_filter_clause(&mut query_builder).switch()?; for dim in dimensions.iter() { query_builder.add_group_by_clause(dim).switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .switch()?; } query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Success, ) .switch()?; query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 366, "total_crates": null }
fn_clm_analytics_load_metrics_-5571547427465954808
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_reason // Implementation of RefundReason for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { let mut inner_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); inner_query_builder .add_select_column("sum(sign_flag)") .switch()?; inner_query_builder .add_custom_filter_clause( RefundDimensions::RefundReason, "NULL", FilterTypes::IsNotNull, ) .switch()?; time_range .set_filter_clause(&mut inner_query_builder) .attach_printable("Error filtering time range for inner query") .switch()?; let inner_query_string = inner_query_builder .build_query() .attach_printable("Error building inner query") .change_context(MetricsError::QueryBuildingError)?; let mut outer_query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { outer_query_builder.add_select_column(dim).switch()?; } outer_query_builder .add_select_column("sum(sign_flag) AS count") .switch()?; outer_query_builder .add_select_column(format!("({inner_query_string}) AS total")) .switch()?; outer_query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; outer_query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters .set_filter_clause(&mut outer_query_builder) .switch()?; auth.set_filter_clause(&mut outer_query_builder).switch()?; time_range .set_filter_clause(&mut outer_query_builder) .attach_printable("Error filtering time range for outer query") .switch()?; outer_query_builder .add_custom_filter_clause( RefundDimensions::RefundReason, "NULL", FilterTypes::IsNotNull, ) .switch()?; for dim in dimensions.iter() { outer_query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut outer_query_builder) .attach_printable("Error adding granularity") .switch()?; } outer_query_builder .add_order_by_clause("count", Order::Descending) .attach_printable("Error adding order by clause") .switch()?; let filtered_dimensions: Vec<&RefundDimensions> = dimensions .iter() .filter(|&&dim| dim != RefundDimensions::RefundReason) .collect(); for dim in &filtered_dimensions { outer_query_builder .add_order_by_clause(*dim, Order::Ascending) .attach_printable("Error adding order by clause") .switch()?; } outer_query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), None, i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 416, "total_crates": null }
fn_clm_analytics_load_metrics_-4794854965856731502
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/metrics/sessionized_metrics/refund_count // Implementation of RefundCount for super::RefundMetric<T> async fn load_metrics( &self, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<HashSet<(RefundMetricsBucketIdentifier, RefundMetricRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } if let Some(granularity) = granularity { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } query_builder .execute_query::<RefundMetricRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.refund_status.as_ref().map(|i| i.0.to_string()), i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 374, "total_crates": null }
fn_clm_analytics_load_distribution_-6616504785278885636
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/distribution/sessionized_distribution/refund_error_message // Implementation of RefundErrorMessage for RefundDistribution<T> async fn load_distribution( &self, distribution: &RefundDistributionBody, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: &Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(RefundMetricsBucketIdentifier, RefundDistributionRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(&distribution.distribution_for) .switch()?; query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; query_builder .add_filter_clause( RefundDimensions::RefundStatus, storage_enums::RefundStatus::Failure, ) .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause(&distribution.distribution_for) .attach_printable("Error grouping by distribution_for") .switch()?; if let Some(granularity) = granularity.as_ref() { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } for dim in dimensions.iter() { query_builder.add_outer_select_column(dim).switch()?; } query_builder .add_outer_select_column(&distribution.distribution_for) .switch()?; query_builder.add_outer_select_column("count").switch()?; query_builder .add_outer_select_column("start_bucket") .switch()?; query_builder .add_outer_select_column("end_bucket") .switch()?; let sql_dimensions = query_builder.transform_to_sql_values(dimensions).switch()?; query_builder .add_outer_select_column(Window::Sum { field: "count", partition_by: Some(sql_dimensions), order_by: None, alias: Some("total"), }) .switch()?; query_builder .add_top_n_clause( dimensions, distribution.distribution_cardinality.into(), "count", Order::Descending, ) .switch()?; query_builder .execute_query::<RefundDistributionRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.refund_status.as_ref().map(|i| i.0.to_string()), i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< Vec<(RefundMetricsBucketIdentifier, RefundDistributionRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 177, "total_crates": null }
fn_clm_analytics_load_distribution_5409451445516504159
clm
function
// Repository: hyperswitch // Crate: analytics // Purpose: Event logging with Kafka and ClickHouse // Module: crates/analytics/src/refunds/distribution/sessionized_distribution/refund_reason // Implementation of RefundReason for RefundDistribution<T> async fn load_distribution( &self, distribution: &RefundDistributionBody, dimensions: &[RefundDimensions], auth: &AuthInfo, filters: &RefundFilters, granularity: &Option<Granularity>, time_range: &TimeRange, pool: &T, ) -> MetricsResult<Vec<(RefundMetricsBucketIdentifier, RefundDistributionRow)>> { let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RefundSessionized); for dim in dimensions.iter() { query_builder.add_select_column(dim).switch()?; } query_builder .add_select_column(&distribution.distribution_for) .switch()?; query_builder .add_select_column(Aggregate::Count { field: None, alias: Some("count"), }) .switch()?; query_builder .add_select_column(Aggregate::Min { field: "created_at", alias: Some("start_bucket"), }) .switch()?; query_builder .add_select_column(Aggregate::Max { field: "created_at", alias: Some("end_bucket"), }) .switch()?; filters.set_filter_clause(&mut query_builder).switch()?; auth.set_filter_clause(&mut query_builder).switch()?; time_range .set_filter_clause(&mut query_builder) .attach_printable("Error filtering time range") .switch()?; for dim in dimensions.iter() { query_builder .add_group_by_clause(dim) .attach_printable("Error grouping by dimensions") .switch()?; } query_builder .add_group_by_clause(&distribution.distribution_for) .attach_printable("Error grouping by distribution_for") .switch()?; if let Some(granularity) = granularity.as_ref() { granularity .set_group_by_clause(&mut query_builder) .attach_printable("Error adding granularity") .switch()?; } for dim in dimensions.iter() { query_builder.add_outer_select_column(dim).switch()?; } query_builder .add_outer_select_column(&distribution.distribution_for) .switch()?; query_builder.add_outer_select_column("count").switch()?; query_builder .add_outer_select_column("start_bucket") .switch()?; query_builder .add_outer_select_column("end_bucket") .switch()?; let sql_dimensions = query_builder.transform_to_sql_values(dimensions).switch()?; query_builder .add_outer_select_column(Window::Sum { field: "count", partition_by: Some(sql_dimensions), order_by: None, alias: Some("total"), }) .switch()?; query_builder .add_top_n_clause( dimensions, distribution.distribution_cardinality.into(), "count", Order::Descending, ) .switch()?; query_builder .execute_query::<RefundDistributionRow, _>(pool) .await .change_context(MetricsError::QueryBuildingError)? .change_context(MetricsError::QueryExecutionFailure)? .into_iter() .map(|i| { Ok(( RefundMetricsBucketIdentifier::new( i.currency.as_ref().map(|i| i.0), i.refund_status.as_ref().map(|i| i.0.to_string()), i.connector.clone(), i.refund_type.as_ref().map(|i| i.0.to_string()), i.profile_id.clone(), i.refund_reason.clone(), i.refund_error_message.clone(), TimeRange { start_time: match (granularity, i.start_bucket) { (Some(g), Some(st)) => g.clip_to_start(st)?, _ => time_range.start_time, }, end_time: granularity.as_ref().map_or_else( || Ok(time_range.end_time), |g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(), )?, }, ), i, )) }) .collect::<error_stack::Result< Vec<(RefundMetricsBucketIdentifier, RefundDistributionRow)>, crate::query::PostProcessingError, >>() .change_context(MetricsError::PostProcessingFailure) }
{ "crate": "analytics", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 173, "total_crates": null }
fn_clm_hyperswitch_interfaces_default_-4258213000834126872
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/types // Implementation of Proxy for Default fn default() -> Self { Self { http_url: Default::default(), https_url: Default::default(), idle_pool_connection_timeout: Some(90), bypass_proxy_hosts: Default::default(), mitm_ca_certificate: None, mitm_enabled: None, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 7709, "total_crates": null }
fn_clm_hyperswitch_interfaces_default_2194152278416359693
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/configs // Implementation of GlobalTenant for Default fn default() -> Self { Self { tenant_id: id_type::TenantId::get_default_global_tenant_id(), schema: String::from("global"), redis_key_prefix: String::from("global"), clickhouse_database: String::from("global"), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 7711, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_connector_account_details_2194152278416359693
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/configs // Inherent implementation for MerchantConnectorAccountType pub fn get_connector_account_details(&self) -> serde_json::Value { match self { Self::DbVal(val) => val.connector_account_details.peek().to_owned(), Self::CacheVal(val) => val.connector_account_details.peek().to_owned(), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 146, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_metadata_2194152278416359693
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/configs // Inherent implementation for MerchantConnectorAccountType pub fn get_metadata(&self) -> Option<Secret<serde_json::Value>> { match self { Self::DbVal(val) => val.metadata.to_owned(), Self::CacheVal(val) => val.metadata.to_owned(), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 115, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_connector_wallets_details_2194152278416359693
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/configs // Inherent implementation for MerchantConnectorAccountType pub fn get_connector_wallets_details(&self) -> Option<Secret<serde_json::Value>> { match self { Self::DbVal(val) => val.connector_wallets_details.as_deref().cloned(), Self::CacheVal(_) => None, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 64, "total_crates": null }
fn_clm_hyperswitch_interfaces_is_test_mode_on_2194152278416359693
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/configs // Inherent implementation for MerchantConnectorAccountType pub fn is_test_mode_on(&self) -> Option<bool> { None }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 57, "total_crates": null }
fn_clm_hyperswitch_interfaces_from_7837160282639914106
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/disputes // Implementation of DisputePayload for From<DisputeSyncResponse> fn from(dispute_sync_data: DisputeSyncResponse) -> Self { Self { amount: dispute_sync_data.amount, currency: dispute_sync_data.currency, dispute_stage: dispute_sync_data.dispute_stage, connector_status: dispute_sync_data.connector_status, connector_dispute_id: dispute_sync_data.connector_dispute_id, connector_reason: dispute_sync_data.connector_reason, connector_reason_code: dispute_sync_data.connector_reason_code, challenge_required_by: dispute_sync_data.challenge_required_by, created_at: dispute_sync_data.created_at, updated_at: dispute_sync_data.updated_at, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 2600, "total_crates": null }
fn_clm_hyperswitch_interfaces_from_old_router_data_4687288070451708130
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/conversion_impls // Implementation of ExternalVaultProxyFlowData for RouterDataConversion<T, Req, Resp> fn from_old_router_data( old_router_data: &RouterData<T, Req, Resp>, ) -> CustomResult<RouterDataV2<T, Self, Req, Resp>, ConnectorError> where Self: Sized, { let resource_common_data = Self { merchant_id: old_router_data.merchant_id.clone(), customer_id: old_router_data.customer_id.clone(), connector_customer: old_router_data.connector_customer.clone(), payment_id: old_router_data.payment_id.clone(), attempt_id: old_router_data.attempt_id.clone(), status: old_router_data.status, payment_method: old_router_data.payment_method, description: old_router_data.description.clone(), address: old_router_data.address.clone(), auth_type: old_router_data.auth_type, connector_meta_data: old_router_data.connector_meta_data.clone(), amount_captured: old_router_data.amount_captured, minor_amount_captured: old_router_data.minor_amount_captured, access_token: old_router_data.access_token.clone(), session_token: old_router_data.session_token.clone(), reference_id: old_router_data.reference_id.clone(), payment_method_token: old_router_data.payment_method_token.clone(), recurring_mandate_payment_data: old_router_data.recurring_mandate_payment_data.clone(), preprocessing_id: old_router_data.preprocessing_id.clone(), payment_method_balance: old_router_data.payment_method_balance.clone(), connector_api_version: old_router_data.connector_api_version.clone(), connector_request_reference_id: old_router_data.connector_request_reference_id.clone(), test_mode: old_router_data.test_mode, connector_http_status_code: old_router_data.connector_http_status_code, external_latency: old_router_data.external_latency, apple_pay_flow: old_router_data.apple_pay_flow.clone(), connector_response: old_router_data.connector_response.clone(), payment_method_status: old_router_data.payment_method_status, }; Ok(RouterDataV2 { flow: std::marker::PhantomData, tenant_id: old_router_data.tenant_id.clone(), resource_common_data, connector_auth_type: old_router_data.connector_auth_type.clone(), request: old_router_data.request.clone(), response: old_router_data.response.clone(), }) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 60, "total_crates": null }
fn_clm_hyperswitch_interfaces_to_old_router_data_4687288070451708130
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/conversion_impls // Implementation of ExternalVaultProxyFlowData for RouterDataConversion<T, Req, Resp> fn to_old_router_data( new_router_data: RouterDataV2<T, Self, Req, Resp>, ) -> CustomResult<RouterData<T, Req, Resp>, ConnectorError> where Self: Sized, { let Self { merchant_id, customer_id, connector_customer, payment_id, attempt_id, status, payment_method, description, address, auth_type, connector_meta_data, amount_captured, minor_amount_captured, access_token, session_token, reference_id, payment_method_token, recurring_mandate_payment_data, preprocessing_id, payment_method_balance, connector_api_version, connector_request_reference_id, test_mode, connector_http_status_code, external_latency, apple_pay_flow, connector_response, payment_method_status, } = new_router_data.resource_common_data; let mut router_data = get_default_router_data( new_router_data.tenant_id.clone(), "external vault proxy", new_router_data.request, new_router_data.response, ); router_data.merchant_id = merchant_id; router_data.customer_id = customer_id; router_data.connector_customer = connector_customer; router_data.payment_id = payment_id; router_data.attempt_id = attempt_id; router_data.status = status; router_data.payment_method = payment_method; router_data.description = description; router_data.address = address; router_data.auth_type = auth_type; router_data.connector_meta_data = connector_meta_data; router_data.amount_captured = amount_captured; router_data.minor_amount_captured = minor_amount_captured; router_data.access_token = access_token; router_data.session_token = session_token; router_data.reference_id = reference_id; router_data.payment_method_token = payment_method_token; router_data.recurring_mandate_payment_data = recurring_mandate_payment_data; router_data.preprocessing_id = preprocessing_id; router_data.payment_method_balance = payment_method_balance; router_data.connector_api_version = connector_api_version; router_data.connector_request_reference_id = connector_request_reference_id; router_data.test_mode = test_mode; router_data.connector_http_status_code = connector_http_status_code; router_data.external_latency = external_latency; router_data.apple_pay_flow = apple_pay_flow; router_data.connector_response = connector_response; router_data.payment_method_status = payment_method_status; Ok(router_data) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 51, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_default_router_data_4687288070451708130
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/conversion_impls fn get_default_router_data<F, Req, Resp>( tenant_id: id_type::TenantId, flow_name: &str, request: Req, response: Result<Resp, router_data::ErrorResponse>, ) -> RouterData<F, Req, Resp> { RouterData { tenant_id, flow: std::marker::PhantomData, merchant_id: id_type::MerchantId::get_irrelevant_merchant_id(), customer_id: None, connector_customer: None, connector: get_irrelevant_id_string("connector", flow_name), payment_id: id_type::PaymentId::get_irrelevant_id(flow_name) .get_string_repr() .to_owned(), attempt_id: get_irrelevant_id_string("attempt_id", flow_name), status: common_enums::AttemptStatus::default(), payment_method: common_enums::PaymentMethod::default(), connector_auth_type: router_data::ConnectorAuthType::default(), description: None, address: PaymentAddress::default(), auth_type: common_enums::AuthenticationType::default(), connector_meta_data: None, connector_wallets_details: None, amount_captured: None, access_token: None, session_token: None, reference_id: None, payment_method_token: None, recurring_mandate_payment_data: None, preprocessing_id: None, payment_method_balance: None, connector_api_version: None, request, response, connector_request_reference_id: get_irrelevant_id_string( "connector_request_reference_id", flow_name, ), #[cfg(feature = "payouts")] payout_method_data: None, #[cfg(feature = "payouts")] quote_id: None, test_mode: None, connector_http_status_code: None, external_latency: None, apple_pay_flow: None, frm_metadata: None, dispute_id: None, refund_id: None, connector_response: None, payment_method_status: None, minor_amount_captured: None, integrity_check: Ok(()), additional_merchant_data: None, header_payload: None, connector_mandate_request_reference_id: None, authentication_id: None, psd2_sca_exemption_type: None, raw_connector_response: None, is_payment_id_from_merchant: None, payment_method_type: None, l2_l3_data: None, minor_amount_capturable: None, authorized_amount: None, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 27, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_irrelevant_id_string_4687288070451708130
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/conversion_impls fn get_irrelevant_id_string(id_name: &str, flow_name: &str) -> String { format!("irrelevant {id_name} in {flow_name} flow") }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 9, "total_crates": null }
fn_clm_hyperswitch_interfaces_handle_unified_connector_service_response_for_payment_get_-4315632138640342247
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/unified_connector_service pub fn handle_unified_connector_service_response_for_payment_get( response: payments_grpc::PaymentServiceGetResponse, ) -> UnifiedConnectorServiceResult { let status_code = transformers::convert_connector_service_status_code(response.status_code)?; let router_data_response = Result::<(PaymentsResponseData, AttemptStatus), ErrorResponse>::foreign_try_from(response)?; Ok((router_data_response, status_code)) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 20, "total_crates": null }
fn_clm_hyperswitch_interfaces_compare_-6214506328681937042
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/integrity // Implementation of CaptureIntegrityObject for FlowIntegrity fn compare( req_integrity_object: Self, res_integrity_object: Self, connector_transaction_id: Option<String>, ) -> Result<(), IntegrityCheckError> { let mut mismatched_fields = Vec::new(); res_integrity_object .capture_amount .zip(req_integrity_object.capture_amount) .map(|(res_amount, req_amount)| { if res_amount != req_amount { mismatched_fields.push(format_mismatch( "capture_amount", &req_amount.to_string(), &res_amount.to_string(), )); } }); if req_integrity_object.currency != res_integrity_object.currency { mismatched_fields.push(format_mismatch( "currency", &req_integrity_object.currency.to_string(), &res_integrity_object.currency.to_string(), )); } if mismatched_fields.is_empty() { Ok(()) } else { let field_names = mismatched_fields.join(", "); Err(IntegrityCheckError { field_names, connector_transaction_id, }) } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 37, "total_crates": null }
fn_clm_hyperswitch_interfaces_check_integrity_-6214506328681937042
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/integrity // Implementation of PaymentsSyncData for CheckIntegrity<Request, T> fn check_integrity( &self, request: &Request, connector_transaction_id: Option<String>, ) -> Result<(), IntegrityCheckError> { match request.get_response_integrity_object() { Some(res_integrity_object) => { let req_integrity_object = request.get_request_integrity_object(); T::compare( req_integrity_object, res_integrity_object, connector_transaction_id, ) } None => Ok(()), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 23, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_response_integrity_object_-6214506328681937042
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/integrity // Implementation of PaymentsSyncData for GetIntegrityObject<SyncIntegrityObject> fn get_response_integrity_object(&self) -> Option<SyncIntegrityObject> { self.integrity_object.clone() }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 13, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_request_integrity_object_-6214506328681937042
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/integrity // Implementation of PaymentsSyncData for GetIntegrityObject<SyncIntegrityObject> fn get_request_integrity_object(&self) -> SyncIntegrityObject { SyncIntegrityObject { amount: Some(self.amount), currency: Some(self.currency), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 11, "total_crates": null }
fn_clm_hyperswitch_interfaces_format_mismatch_-6214506328681937042
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/integrity fn format_mismatch(field: &str, expected: &str, found: &str) -> String { format!("{field} expected {expected} but found {found}") }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 6, "total_crates": null }
fn_clm_hyperswitch_interfaces_build_error_response_7144157403563039060
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_interface // Implementation of ConnectorEnum for ConnectorCommon fn build_error_response( &self, res: types::Response, event_builder: Option<&mut ConnectorEvent>, ) -> CustomResult<ErrorResponse, errors::ConnectorError> { match self { Self::Old(connector) => connector.build_error_response(res, event_builder), Self::New(connector) => connector.build_error_response(res, event_builder), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 435, "total_crates": null }
fn_clm_hyperswitch_interfaces_common_get_content_type_7144157403563039060
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_interface // Implementation of ConnectorEnum for ConnectorCommon fn common_get_content_type(&self) -> &'static str { match self { Self::Old(connector) => connector.common_get_content_type(), Self::New(connector) => connector.common_get_content_type(), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 381, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_connector_integration_7144157403563039060
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_interface // Inherent implementation for ConnectorEnum /// Get the connector integration /// /// # Returns /// /// A `BoxedConnectorIntegrationInterface` containing the connector integration pub fn get_connector_integration<F, ResourceCommonData, Req, Resp>( &self, ) -> BoxedConnectorIntegrationInterface<F, ResourceCommonData, Req, Resp> where dyn Connector + Sync: ConnectorIntegration<F, Req, Resp>, dyn ConnectorV2 + Sync: ConnectorIntegrationV2<F, ResourceCommonData, Req, Resp>, ResourceCommonData: RouterDataConversion<F, Req, Resp> + Clone + 'static, F: Clone + 'static, Req: Clone + 'static, Resp: Clone + 'static, { match self { Self::Old(old_integration) => Box::new(ConnectorIntegrationEnum::Old( old_integration.get_connector_integration(), )), Self::New(new_integration) => Box::new(ConnectorIntegrationEnum::New( new_integration.get_connector_integration_v2(), )), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 369, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_auth_header_7144157403563039060
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_interface // Implementation of ConnectorEnum for ConnectorCommon fn get_auth_header( &self, auth_type: &ConnectorAuthType, ) -> CustomResult<Vec<(String, masking::Maskable<String>)>, errors::ConnectorError> { match self { Self::Old(connector) => connector.get_auth_header(auth_type), Self::New(connector) => connector.get_auth_header(auth_type), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 276, "total_crates": null }
fn_clm_hyperswitch_interfaces_id_7144157403563039060
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_interface // Implementation of ConnectorEnum for ConnectorCommon fn id(&self) -> &'static str { match self { Self::Old(connector) => connector.id(), Self::New(connector) => connector.id(), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 231, "total_crates": null }
fn_clm_hyperswitch_interfaces_from_-789150134206428872
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/webhooks // Implementation of IncomingWebhookFlowError for From<&ApiErrorResponse> fn from(api_error_response: &ApiErrorResponse) -> Self { match api_error_response { ApiErrorResponse::WebhookResourceNotFound | ApiErrorResponse::DisputeNotFound { .. } | ApiErrorResponse::PayoutNotFound | ApiErrorResponse::MandateNotFound | ApiErrorResponse::PaymentNotFound | ApiErrorResponse::RefundNotFound | ApiErrorResponse::AuthenticationNotFound { .. } => Self::ResourceNotFound, _ => Self::InternalError, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 2600, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_webhook_source_verification_merchant_secret_-789150134206428872
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/webhooks /// fn get_webhook_source_verification_merchant_secret async fn get_webhook_source_verification_merchant_secret( &self, merchant_id: &common_utils::id_type::MerchantId, connector_name: &str, connector_webhook_details: Option<common_utils::pii::SecretSerdeValue>, ) -> CustomResult<api_models::webhooks::ConnectorWebhookSecrets, errors::ConnectorError> { let debug_suffix = format!("For merchant_id: {merchant_id:?}, and connector_name: {connector_name}"); let default_secret = "default_secret".to_string(); let merchant_secret = match connector_webhook_details { Some(merchant_connector_webhook_details) => { let connector_webhook_details = merchant_connector_webhook_details .parse_value::<api_models::admin::MerchantConnectorWebhookDetails>( "MerchantConnectorWebhookDetails", ) .change_context_lazy(|| errors::ConnectorError::WebhookSourceVerificationFailed) .attach_printable_lazy(|| { format!( "Deserializing MerchantConnectorWebhookDetails failed {debug_suffix}", ) })?; api_models::webhooks::ConnectorWebhookSecrets { secret: connector_webhook_details .merchant_secret .expose() .into_bytes(), additional_secret: connector_webhook_details.additional_secret, } } None => api_models::webhooks::ConnectorWebhookSecrets { secret: default_secret.into_bytes(), additional_secret: None, }, }; //need to fetch merchant secret from config table with caching in future for enhanced performance //If merchant has not set the secret for webhook source verification, "default_secret" is returned. //So it will fail during verification step and goes to psync flow. Ok(merchant_secret) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 81, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_webhook_source_verification_signature_-789150134206428872
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/webhooks /// fn get_webhook_source_verification_signature fn get_webhook_source_verification_signature( &self, _request: &IncomingWebhookRequestDetails<'_>, _connector_webhook_secrets: &api_models::webhooks::ConnectorWebhookSecrets, ) -> CustomResult<Vec<u8>, errors::ConnectorError> { Ok(Vec::new()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 56, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_webhook_source_verification_message_-789150134206428872
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/webhooks /// fn get_webhook_source_verification_message fn get_webhook_source_verification_message( &self, _request: &IncomingWebhookRequestDetails<'_>, _merchant_id: &common_utils::id_type::MerchantId, _connector_webhook_secrets: &api_models::webhooks::ConnectorWebhookSecrets, ) -> CustomResult<Vec<u8>, errors::ConnectorError> { Ok(Vec::new()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 44, "total_crates": null }
fn_clm_hyperswitch_interfaces_verify_webhook_source_-789150134206428872
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/webhooks /// fn verify_webhook_source async fn verify_webhook_source( &self, request: &IncomingWebhookRequestDetails<'_>, merchant_id: &common_utils::id_type::MerchantId, connector_webhook_details: Option<common_utils::pii::SecretSerdeValue>, _connector_account_details: crypto::Encryptable<Secret<serde_json::Value>>, connector_name: &str, ) -> CustomResult<bool, errors::ConnectorError> { let algorithm = self .get_webhook_source_verification_algorithm(request) .change_context(errors::ConnectorError::WebhookSourceVerificationFailed)?; let connector_webhook_secrets = self .get_webhook_source_verification_merchant_secret( merchant_id, connector_name, connector_webhook_details, ) .await .change_context(errors::ConnectorError::WebhookSourceVerificationFailed)?; let signature = self .get_webhook_source_verification_signature(request, &connector_webhook_secrets) .change_context(errors::ConnectorError::WebhookSourceVerificationFailed)?; let message = self .get_webhook_source_verification_message( request, merchant_id, &connector_webhook_secrets, ) .change_context(errors::ConnectorError::WebhookSourceVerificationFailed)?; algorithm .verify_signature(&connector_webhook_secrets.secret, &signature, &message) .change_context(errors::ConnectorError::WebhookSourceVerificationFailed) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 35, "total_crates": null }
fn_clm_hyperswitch_interfaces_switch_5068353074430346642
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/errors // Implementation of HttpClientError for ErrorSwitch<ApiClientError> fn switch(&self) -> ApiClientError { match self { Self::HeaderMapConstructionFailed => ApiClientError::HeaderMapConstructionFailed, Self::InvalidProxyConfiguration => ApiClientError::InvalidProxyConfiguration, Self::ClientConstructionFailed => ApiClientError::ClientConstructionFailed, Self::CertificateDecodeFailed => ApiClientError::CertificateDecodeFailed, Self::BodySerializationFailed => ApiClientError::BodySerializationFailed, Self::UnexpectedState => ApiClientError::UnexpectedState, Self::UrlParsingFailed => ApiClientError::UrlParsingFailed, Self::UrlEncodingFailed => ApiClientError::UrlEncodingFailed, Self::RequestNotSent(reason) => ApiClientError::RequestNotSent(reason.clone()), Self::ResponseDecodingFailed => ApiClientError::ResponseDecodingFailed, Self::RequestTimeoutReceived => ApiClientError::RequestTimeoutReceived, Self::ConnectionClosedIncompleteMessage => { ApiClientError::ConnectionClosedIncompleteMessage } Self::InternalServerErrorReceived => ApiClientError::InternalServerErrorReceived, Self::BadGatewayReceived => ApiClientError::BadGatewayReceived, Self::ServiceUnavailableReceived => ApiClientError::ServiceUnavailableReceived, Self::GatewayTimeoutReceived => ApiClientError::GatewayTimeoutReceived, Self::UnexpectedServerResponse => ApiClientError::UnexpectedServerResponse, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 3084, "total_crates": null }
fn_clm_hyperswitch_interfaces_is_connector_timeout_5068353074430346642
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/errors // Inherent implementation for ConnectorError /// fn is_connector_timeout pub fn is_connector_timeout(&self) -> bool { self == &Self::RequestTimeoutReceived }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 27, "total_crates": null }
fn_clm_hyperswitch_interfaces_execute_connector_processing_step_1671320860412774063
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api_client pub async fn execute_connector_processing_step< 'b, 'a, T, ResourceCommonData: Clone + RouterDataConversion<T, Req, Resp> + 'static, Req: Debug + Clone + 'static, Resp: Debug + Clone + 'static, >( state: &dyn ApiClientWrapper, connector_integration: BoxedConnectorIntegrationInterface<T, ResourceCommonData, Req, Resp>, req: &'b RouterData<T, Req, Resp>, call_connector_action: common_enums::CallConnectorAction, connector_request: Option<Request>, return_raw_connector_response: Option<bool>, ) -> CustomResult<RouterData<T, Req, Resp>, ConnectorError> where T: Clone + Debug + 'static, // BoxedConnectorIntegration<T, Req, Resp>: 'b, { // If needed add an error stack as follows // connector_integration.build_request(req).attach_printable("Failed to build request"); tracing::Span::current().record("connector_name", &req.connector); tracing::Span::current().record("payment_method", req.payment_method.to_string()); logger::debug!(connector_request=?connector_request); let mut router_data = req.clone(); match call_connector_action { common_enums::CallConnectorAction::HandleResponse(res) => { let response = types::Response { headers: None, response: res.into(), status_code: 200, }; connector_integration.handle_response(req, None, response) } common_enums::CallConnectorAction::UCSHandleResponse(transform_data_bytes) => { handle_ucs_response(router_data, transform_data_bytes) } common_enums::CallConnectorAction::Avoid => Ok(router_data), common_enums::CallConnectorAction::StatusUpdate { status, error_code, error_message, } => { router_data.status = status; let error_response = if error_code.is_some() | error_message.is_some() { Some(ErrorResponse { code: error_code.unwrap_or(consts::NO_ERROR_CODE.to_string()), message: error_message.unwrap_or(consts::NO_ERROR_MESSAGE.to_string()), status_code: 200, // This status code is ignored in redirection response it will override with 302 status code. reason: None, attempt_status: None, connector_transaction_id: None, network_advice_code: None, network_decline_code: None, network_error_message: None, connector_metadata: None, }) } else { None }; router_data.response = error_response.map(Err).unwrap_or(router_data.response); Ok(router_data) } common_enums::CallConnectorAction::Trigger => { metrics::CONNECTOR_CALL_COUNT.add( 1, router_env::metric_attributes!( ("connector", req.connector.to_string()), ( "flow", get_flow_name::<T>().unwrap_or_else(|_| "UnknownFlow".to_string()) ), ), ); let connector_request = match connector_request { Some(connector_request) => Some(connector_request), None => connector_integration .build_request(req, &state.get_connectors()) .inspect_err(|error| { if matches!( error.current_context(), &ConnectorError::RequestEncodingFailed | &ConnectorError::RequestEncodingFailedWithReason(_) ) { metrics::REQUEST_BUILD_FAILURE.add( 1, router_env::metric_attributes!(( "connector", req.connector.clone() )), ) } })?, }; match connector_request { Some(mut request) => { let masked_request_body = match &request.body { Some(request) => match request { RequestContent::Json(i) | RequestContent::FormUrlEncoded(i) | RequestContent::Xml(i) => i .masked_serialize() .unwrap_or(json!({ "error": "failed to mask serialize"})), RequestContent::FormData((_, i)) => i .masked_serialize() .unwrap_or(json!({ "error": "failed to mask serialize"})), RequestContent::RawBytes(_) => json!({"request_type": "RAW_BYTES"}), }, None => serde_json::Value::Null, }; let flow_name = get_flow_name::<T>().unwrap_or_else(|_| "UnknownFlow".to_string()); request.headers.insert(( X_FLOW_NAME.to_string(), Maskable::Masked(masking::Secret::new(flow_name.to_string())), )); let connector_name = req.connector.clone(); request.headers.insert(( X_CONNECTOR_NAME.to_string(), Maskable::Masked(masking::Secret::new(connector_name.clone().to_string())), )); state.get_request_id().as_ref().map(|id| { let request_id = id.to_string(); request.headers.insert(( X_REQUEST_ID.to_string(), Maskable::Normal(request_id.clone()), )); request_id }); let request_url = request.url.clone(); let request_method = request.method; let current_time = Instant::now(); let response = call_connector_api(state, request, "execute_connector_processing_step") .await; let external_latency = current_time.elapsed().as_millis(); logger::info!(raw_connector_request=?masked_request_body); let status_code = response .as_ref() .map(|i| { i.as_ref() .map_or_else(|value| value.status_code, |value| value.status_code) }) .unwrap_or_default(); let mut connector_event = ConnectorEvent::new( state.get_tenant().tenant_id.clone(), req.connector.clone(), std::any::type_name::<T>(), masked_request_body, request_url, request_method, req.payment_id.clone(), req.merchant_id.clone(), state.get_request_id().as_ref(), external_latency, req.refund_id.clone(), req.dispute_id.clone(), status_code, ); match response { Ok(body) => { let response = match body { Ok(body) => { let connector_http_status_code = Some(body.status_code); let handle_response_result = connector_integration .handle_response( req, Some(&mut connector_event), body.clone(), ) .inspect_err(|error| { if error.current_context() == &ConnectorError::ResponseDeserializationFailed { metrics::RESPONSE_DESERIALIZATION_FAILURE.add( 1, router_env::metric_attributes!(( "connector", req.connector.clone(), )), ) } }); match handle_response_result { Ok(mut data) => { state .event_handler() .log_connector_event(&connector_event); data.connector_http_status_code = connector_http_status_code; // Add up multiple external latencies in case of multiple external calls within the same request. data.external_latency = Some( data.external_latency .map_or(external_latency, |val| { val + external_latency }), ); store_raw_connector_response_if_required( return_raw_connector_response, &mut data, &body, )?; Ok(data) } Err(err) => { connector_event .set_error(json!({"error": err.to_string()})); state .event_handler() .log_connector_event(&connector_event); Err(err) } }? } Err(body) => { router_data.connector_http_status_code = Some(body.status_code); router_data.external_latency = Some( router_data .external_latency .map_or(external_latency, |val| val + external_latency), ); metrics::CONNECTOR_ERROR_RESPONSE_COUNT.add( 1, router_env::metric_attributes!(( "connector", req.connector.clone(), )), ); store_raw_connector_response_if_required( return_raw_connector_response, &mut router_data, &body, )?; let error = match body.status_code { 500..=511 => { let error_res = connector_integration .get_5xx_error_response( body, Some(&mut connector_event), )?; state .event_handler() .log_connector_event(&connector_event); error_res } _ => { let error_res = connector_integration .get_error_response( body, Some(&mut connector_event), )?; if let Some(status) = error_res.attempt_status { router_data.status = status; }; state .event_handler() .log_connector_event(&connector_event); error_res } }; router_data.response = Err(error); router_data } }; Ok(response) } Err(error) => { connector_event.set_error(json!({"error": error.to_string()})); state.event_handler().log_connector_event(&connector_event); if error.current_context().is_upstream_timeout() { let error_response = ErrorResponse { code: consts::REQUEST_TIMEOUT_ERROR_CODE.to_string(), message: consts::REQUEST_TIMEOUT_ERROR_MESSAGE.to_string(), reason: Some(consts::REQUEST_TIMEOUT_ERROR_MESSAGE.to_string()), status_code: 504, attempt_status: None, connector_transaction_id: None, network_advice_code: None, network_decline_code: None, network_error_message: None, connector_metadata: None, }; router_data.response = Err(error_response); router_data.connector_http_status_code = Some(504); router_data.external_latency = Some( router_data .external_latency .map_or(external_latency, |val| val + external_latency), ); Ok(router_data) } else { Err(error .change_context(ConnectorError::ProcessingStepFailed(None))) } } } } None => Ok(router_data), } } } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 459, "total_crates": null }
fn_clm_hyperswitch_interfaces_call_connector_api_1671320860412774063
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api_client pub async fn call_connector_api( state: &dyn ApiClientWrapper, request: Request, flow_name: &str, ) -> CustomResult<Result<types::Response, types::Response>, ApiClientError> { let current_time = Instant::now(); let headers = request.headers.clone(); let url = request.url.clone(); let response = state .get_api_client() .send_request(state, request, None, true) .await; match response.as_ref() { Ok(resp) => { let status_code = resp.status().as_u16(); let elapsed_time = current_time.elapsed(); logger::info!( ?headers, url, status_code, flow=?flow_name, ?elapsed_time ); } Err(err) => { logger::info!( call_connector_api_error=?err ); } } handle_response(response).await }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 93, "total_crates": null }
fn_clm_hyperswitch_interfaces_handle_ucs_response_1671320860412774063
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api_client /// Handle UCS webhook response processing pub fn handle_ucs_response<T, Req, Resp>( router_data: RouterData<T, Req, Resp>, transform_data_bytes: Vec<u8>, ) -> CustomResult<RouterData<T, Req, Resp>, ConnectorError> where T: Clone + Debug + 'static, Req: Debug + Clone + 'static, Resp: Debug + Clone + 'static, { let webhook_transform_data: unified_connector_service::WebhookTransformData = serde_json::from_slice(&transform_data_bytes) .change_context(ConnectorError::ResponseDeserializationFailed) .attach_printable("Failed to deserialize UCS webhook transform data")?; let webhook_content = webhook_transform_data .webhook_content .ok_or(ConnectorError::ResponseDeserializationFailed) .attach_printable("UCS webhook transform data missing webhook_content")?; let payment_get_response = match webhook_content.content { Some(unified_connector_service_client::payments::webhook_response_content::Content::PaymentsResponse(payments_response)) => { Ok(payments_response) }, Some(unified_connector_service_client::payments::webhook_response_content::Content::RefundsResponse(_)) => { Err(ConnectorError::ProcessingStepFailed(Some("UCS webhook contains refund response but payment processing was expected".to_string().into())).into()) }, Some(unified_connector_service_client::payments::webhook_response_content::Content::DisputesResponse(_)) => { Err(ConnectorError::ProcessingStepFailed(Some("UCS webhook contains dispute response but payment processing was expected".to_string().into())).into()) }, Some(unified_connector_service_client::payments::webhook_response_content::Content::IncompleteTransformation(_)) => { Err(ConnectorError::ProcessingStepFailed(Some("UCS webhook contains incomplete transformation but payment processing was expected".to_string().into())).into()) }, None => { Err(ConnectorError::ResponseDeserializationFailed) .attach_printable("UCS webhook content missing payments_response") } }?; let (router_data_response, status_code) = unified_connector_service::handle_unified_connector_service_response_for_payment_get( payment_get_response.clone(), ) .change_context(ConnectorError::ProcessingStepFailed(None)) .attach_printable("Failed to process UCS webhook response using PSync handler")?; let mut updated_router_data = router_data; let router_data_response = router_data_response.map(|(response, status)| { updated_router_data.status = status; response }); let _ = router_data_response.map_err(|error_response| { updated_router_data.response = Err(error_response); }); updated_router_data.raw_connector_response = payment_get_response .raw_connector_response .map(masking::Secret::new); updated_router_data.connector_http_status_code = Some(status_code); Ok(updated_router_data) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 65, "total_crates": null }
fn_clm_hyperswitch_interfaces_handle_response_1671320860412774063
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api_client pub async fn handle_response( response: CustomResult<reqwest::Response, ApiClientError>, ) -> CustomResult<Result<types::Response, types::Response>, ApiClientError> { response .map(|response| async { logger::info!(?response); let status_code = response.status().as_u16(); let headers = Some(response.headers().to_owned()); match status_code { 200..=202 | 302 | 204 => { // If needed add log line // logger:: error!( error_parsing_response=?err); let response = response .bytes() .await .change_context(ApiClientError::ResponseDecodingFailed) .attach_printable("Error while waiting for response")?; Ok(Ok(types::Response { headers, response, status_code, })) } status_code @ 500..=599 => { let bytes = response.bytes().await.map_err(|error| { report!(error) .change_context(ApiClientError::ResponseDecodingFailed) .attach_printable("Client error response received") })?; // let error = match status_code { // 500 => ApiClientError::InternalServerErrorReceived, // 502 => ApiClientError::BadGatewayReceived, // 503 => ApiClientError::ServiceUnavailableReceived, // 504 => ApiClientError::GatewayTimeoutReceived, // _ => ApiClientError::UnexpectedServerResponse, // }; Ok(Err(types::Response { headers, response: bytes, status_code, })) } status_code @ 400..=499 => { let bytes = response.bytes().await.map_err(|error| { report!(error) .change_context(ApiClientError::ResponseDecodingFailed) .attach_printable("Client error response received") })?; /* let error = match status_code { 400 => ApiClientError::BadRequestReceived(bytes), 401 => ApiClientError::UnauthorizedReceived(bytes), 403 => ApiClientError::ForbiddenReceived, 404 => ApiClientError::NotFoundReceived(bytes), 405 => ApiClientError::MethodNotAllowedReceived, 408 => ApiClientError::RequestTimeoutReceived, 422 => ApiClientError::UnprocessableEntityReceived(bytes), 429 => ApiClientError::TooManyRequestsReceived, _ => ApiClientError::UnexpectedServerResponse, }; Err(report!(error).attach_printable("Client error response received")) */ Ok(Err(types::Response { headers, response: bytes, status_code, })) } _ => Err(report!(ApiClientError::UnexpectedServerResponse) .attach_printable("Unexpected response from server")), } })? .await }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 62, "total_crates": null }
fn_clm_hyperswitch_interfaces_store_raw_connector_response_if_required_1671320860412774063
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api_client /// Store the raw connector response in the router data if required pub fn store_raw_connector_response_if_required<T, Req, Resp>( return_raw_connector_response: Option<bool>, router_data: &mut RouterData<T, Req, Resp>, body: &types::Response, ) -> CustomResult<(), ConnectorError> where T: Clone + Debug + 'static, Req: Debug + Clone + 'static, Resp: Debug + Clone + 'static, { if return_raw_connector_response == Some(true) { let mut decoded = String::from_utf8(body.response.as_ref().to_vec()) .change_context(ConnectorError::ResponseDeserializationFailed)?; if decoded.starts_with('\u{feff}') { decoded = decoded.trim_start_matches('\u{feff}').to_string(); } router_data.raw_connector_response = Some(masking::Secret::new(decoded)); } Ok(()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 32, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_url_1869506791255952584
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_v2 /// returns url fn get_url( &self, _req: &RouterDataV2<Flow, ResourceCommonData, Req, Resp>, ) -> CustomResult<String, errors::ConnectorError> { metrics::UNIMPLEMENTED_FLOW .add(1, router_env::metric_attributes!(("connector", self.id()))); Ok(String::new()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 403, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_headers_1869506791255952584
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_v2 /// returns a vec of tuple of header key and value fn get_headers( &self, _req: &RouterDataV2<Flow, ResourceCommonData, Req, Resp>, ) -> CustomResult<Vec<(String, Maskable<String>)>, errors::ConnectorError> { Ok(vec![]) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 372, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_request_body_1869506791255952584
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_v2 /// returns request body fn get_request_body( &self, _req: &RouterDataV2<Flow, ResourceCommonData, Req, Resp>, ) -> CustomResult<Option<RequestContent>, errors::ConnectorError> { Ok(None) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 285, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_http_method_1869506791255952584
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_v2 /// primarily used when creating signature based on request method of payment flow fn get_http_method(&self) -> Method { Method::Post }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 39, "total_crates": null }
fn_clm_hyperswitch_interfaces_build_request_v2_1869506791255952584
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/connector_integration_v2 /// builds the request and returns it fn build_request_v2( &self, req: &RouterDataV2<Flow, ResourceCommonData, Req, Resp>, ) -> CustomResult<Option<Request>, errors::ConnectorError> { Ok(Some( RequestBuilder::new() .method(self.get_http_method()) .url(self.get_url(req)?.as_str()) .attach_default_headers() .headers(self.get_headers(req)?) .set_optional_body(self.get_request_body(req)?) .add_certificate(self.get_certificate(req)?) .add_certificate_key(self.get_certificate_key(req)?) .build(), )) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 35, "total_crates": null }
fn_clm_hyperswitch_interfaces_build_error_response_8997027586722496145
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api /// common error response for a connector if it is same in all case fn build_error_response( &self, res: types::Response, _event_builder: Option<&mut ConnectorEvent>, ) -> CustomResult<ErrorResponse, errors::ConnectorError> { Ok(ErrorResponse { status_code: res.status_code, code: consts::NO_ERROR_CODE.to_string(), message: consts::NO_ERROR_MESSAGE.to_string(), reason: None, attempt_status: None, connector_transaction_id: None, network_advice_code: None, network_decline_code: None, network_error_message: None, connector_metadata: None, }) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 427, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_url_8997027586722496145
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api /// fn get_url fn get_url( &self, _req: &RouterData<T, Req, Resp>, _connectors: &Connectors, ) -> CustomResult<String, errors::ConnectorError> { Ok(String::new()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 401, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_headers_8997027586722496145
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api /// fn get_headers fn get_headers( &self, _req: &RouterData<T, Req, Resp>, _connectors: &Connectors, ) -> CustomResult<Vec<(String, Maskable<String>)>, errors::ConnectorError> { Ok(vec![]) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 372, "total_crates": null }
fn_clm_hyperswitch_interfaces_common_get_content_type_8997027586722496145
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api /// HTTP `Content-Type` to be used for POST requests. /// Defaults to `application/json`. fn common_get_content_type(&self) -> &'static str { "application/json" }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 369, "total_crates": null }
fn_clm_hyperswitch_interfaces_build_headers_8997027586722496145
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api /// common header builder when every request for the connector have same headers fn build_headers( &self, _req: &RouterData<Flow, Req, Resp>, _connectors: &Connectors, ) -> CustomResult<Vec<(String, Maskable<String>)>, errors::ConnectorError> { Ok(Vec::new()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 362, "total_crates": null }
fn_clm_hyperswitch_interfaces_into_inner_-8610509834670042251
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/secrets_interface/secret_state // Inherent implementation for SecretStateContainer<T, S> pub fn into_inner(self) -> T { self.inner }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 2061, "total_crates": null }
fn_clm_hyperswitch_interfaces_get_inner_-8610509834670042251
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/secrets_interface/secret_state // Inherent implementation for SecretStateContainer<T, S> pub fn get_inner(&self) -> &T { &self.inner }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 594, "total_crates": null }
fn_clm_hyperswitch_interfaces_deserialize_-8610509834670042251
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/secrets_interface/secret_state // Implementation of SecretStateContainer<T, S> for Deserialize<'de> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let val = Deserialize::deserialize(deserializer)?; Ok(Self { inner: val, marker: PhantomData, }) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 136, "total_crates": null }
fn_clm_hyperswitch_interfaces_transition_state_-8610509834670042251
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/secrets_interface/secret_state // Inherent implementation for SecretStateContainer<T, SecuredSecret> /// Transition the secret state from `SecuredSecret` to `RawSecret` pub fn transition_state( mut self, decryptor_fn: impl FnOnce(T) -> T, ) -> SecretStateContainer<T, RawSecret> { self.inner = decryptor_fn(self.inner); SecretStateContainer { inner: self.inner, marker: PhantomData, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 35, "total_crates": null }
fn_clm_hyperswitch_interfaces_foreign_try_from_5570629408270820812
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/unified_connector_service/transformers // Implementation of AttemptStatus for ForeignTryFrom<payments_grpc::PaymentStatus> fn foreign_try_from(grpc_status: payments_grpc::PaymentStatus) -> Result<Self, Self::Error> { match grpc_status { payments_grpc::PaymentStatus::Started => Ok(Self::Started), payments_grpc::PaymentStatus::AuthenticationFailed => Ok(Self::AuthenticationFailed), payments_grpc::PaymentStatus::RouterDeclined => Ok(Self::RouterDeclined), payments_grpc::PaymentStatus::AuthenticationPending => Ok(Self::AuthenticationPending), payments_grpc::PaymentStatus::AuthenticationSuccessful => { Ok(Self::AuthenticationSuccessful) } payments_grpc::PaymentStatus::Authorized => Ok(Self::Authorized), payments_grpc::PaymentStatus::AuthorizationFailed => Ok(Self::AuthorizationFailed), payments_grpc::PaymentStatus::Charged => Ok(Self::Charged), payments_grpc::PaymentStatus::Authorizing => Ok(Self::Authorizing), payments_grpc::PaymentStatus::CodInitiated => Ok(Self::CodInitiated), payments_grpc::PaymentStatus::Voided => Ok(Self::Voided), payments_grpc::PaymentStatus::VoidInitiated => Ok(Self::VoidInitiated), payments_grpc::PaymentStatus::CaptureInitiated => Ok(Self::CaptureInitiated), payments_grpc::PaymentStatus::CaptureFailed => Ok(Self::CaptureFailed), payments_grpc::PaymentStatus::VoidFailed => Ok(Self::VoidFailed), payments_grpc::PaymentStatus::AutoRefunded => Ok(Self::AutoRefunded), payments_grpc::PaymentStatus::PartialCharged => Ok(Self::PartialCharged), payments_grpc::PaymentStatus::PartialChargedAndChargeable => { Ok(Self::PartialChargedAndChargeable) } payments_grpc::PaymentStatus::Unresolved => Ok(Self::Unresolved), payments_grpc::PaymentStatus::Pending => Ok(Self::Pending), payments_grpc::PaymentStatus::Failure => Ok(Self::Failure), payments_grpc::PaymentStatus::PaymentMethodAwaited => Ok(Self::PaymentMethodAwaited), payments_grpc::PaymentStatus::ConfirmationAwaited => Ok(Self::ConfirmationAwaited), payments_grpc::PaymentStatus::DeviceDataCollectionPending => { Ok(Self::DeviceDataCollectionPending) } payments_grpc::PaymentStatus::AttemptStatusUnspecified => Ok(Self::Unresolved), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 428, "total_crates": null }
fn_clm_hyperswitch_interfaces_convert_connector_service_status_code_5570629408270820812
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/unified_connector_service/transformers pub fn convert_connector_service_status_code( status_code: u32, ) -> Result<u16, error_stack::Report<UnifiedConnectorServiceError>> { u16::try_from(status_code).map_err(|err| { UnifiedConnectorServiceError::RequestEncodingFailedWithReason(format!( "Failed to convert connector service status code to u16: {err}" )) .into() }) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 30, "total_crates": null }
fn_clm_hyperswitch_interfaces_validate_file_upload_7705175380174543215
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api/files /// fn validate_file_upload fn validate_file_upload( &self, _purpose: FilePurpose, _file_size: i32, _file_type: mime::Mime, ) -> common_utils::errors::CustomResult<(), errors::ConnectorError> { Err(errors::ConnectorError::FileValidationFailed { reason: "".to_owned(), } .into()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 13, "total_crates": null }
fn_clm_hyperswitch_interfaces_validate_file_upload_v2_6056184559657617353
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/api/files_v2 /// fn validate_file_upload_v2 fn validate_file_upload_v2( &self, _purpose: FilePurpose, _file_size: i32, _file_type: mime::Mime, ) -> common_utils::errors::CustomResult<(), errors::ConnectorError> { Err(errors::ConnectorError::FileValidationFailed { reason: "".to_owned(), } .into()) }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 7, "total_crates": null }
fn_clm_hyperswitch_interfaces_new_4533979817207268970
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/routing_api_logs // Inherent implementation for RoutingEvent pub fn new( tenant_id: common_utils::id_type::TenantId, routable_connectors: String, flow: &str, request: serde_json::Value, url: String, method: ApiMethod, payment_id: String, profile_id: common_utils::id_type::ProfileId, merchant_id: common_utils::id_type::MerchantId, request_id: Option<RequestId>, routing_engine: RoutingEngine, ) -> Self { Self { tenant_id, routable_connectors, flow: flow.to_string(), request: request.to_string(), response: None, error: None, url, method: method.to_string(), payment_id, profile_id, merchant_id, created_at: OffsetDateTime::now_utc().unix_timestamp_nanos(), status_code: None, request_id: request_id .map(|i| i.as_hyphenated().to_string()) .unwrap_or("NO_REQUEST_ID".to_string()), routing_engine, payment_connector: None, routing_approach: None, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 14483, "total_crates": null }
fn_clm_hyperswitch_interfaces_set_response_body_4533979817207268970
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/routing_api_logs // Inherent implementation for RoutingEvent /// fn set_response_body pub fn set_response_body<T: Serialize>(&mut self, response: &T) { match masking::masked_serialize(response) { Ok(masked) => { self.response = Some(masked.to_string()); } Err(er) => self.set_error(json!({"error": er.to_string()})), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 666, "total_crates": null }
fn_clm_hyperswitch_interfaces_set_error_response_body_4533979817207268970
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/routing_api_logs // Inherent implementation for RoutingEvent /// fn set_error_response_body pub fn set_error_response_body<T: Serialize>(&mut self, response: &T) { match masking::masked_serialize(response) { Ok(masked) => { self.error = Some(masked.to_string()); } Err(er) => self.set_error(json!({"error": er.to_string()})), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 219, "total_crates": null }
fn_clm_hyperswitch_interfaces_set_error_4533979817207268970
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/routing_api_logs // Inherent implementation for RoutingEvent /// fn set_error pub fn set_error(&mut self, error: serde_json::Value) { self.error = Some(error.to_string()); }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 110, "total_crates": null }
fn_clm_hyperswitch_interfaces_set_status_code_4533979817207268970
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/routing_api_logs // Inherent implementation for RoutingEvent /// set response status code pub fn set_status_code(&mut self, code: u16) { self.status_code = Some(code); }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 45, "total_crates": null }
fn_clm_hyperswitch_interfaces_new_-5204710062499041806
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/connector_api_logs // Inherent implementation for ConnectorEvent pub fn new( tenant_id: common_utils::id_type::TenantId, connector_name: String, flow: &str, request: serde_json::Value, url: String, method: Method, payment_id: String, merchant_id: common_utils::id_type::MerchantId, request_id: Option<&RequestId>, latency: u128, refund_id: Option<String>, dispute_id: Option<String>, status_code: u16, ) -> Self { Self { tenant_id, connector_name, flow: flow .rsplit_once("::") .map(|(_, s)| s) .unwrap_or(flow) .to_string(), request: request.to_string(), masked_response: None, error: None, url, method: method.to_string(), payment_id, merchant_id, created_at: OffsetDateTime::now_utc().unix_timestamp_nanos() / 1_000_000, request_id: request_id .map(|i| i.as_hyphenated().to_string()) .unwrap_or("NO_REQUEST_ID".to_string()), latency, refund_id, dispute_id, status_code, } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 14489, "total_crates": null }
fn_clm_hyperswitch_interfaces_set_response_body_-5204710062499041806
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/connector_api_logs // Inherent implementation for ConnectorEvent /// fn set_response_body pub fn set_response_body<T: Serialize>(&mut self, response: &T) { match masking::masked_serialize(response) { Ok(masked) => { self.masked_response = Some(masked.to_string()); } Err(er) => self.set_error(json!({"error": er.to_string()})), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 666, "total_crates": null }
fn_clm_hyperswitch_interfaces_set_error_response_body_-5204710062499041806
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/connector_api_logs // Inherent implementation for ConnectorEvent /// fn set_error_response_body pub fn set_error_response_body<T: Serialize>(&mut self, response: &T) { match masking::masked_serialize(response) { Ok(masked) => { self.error = Some(masked.to_string()); } Err(er) => self.set_error(json!({"error": er.to_string()})), } }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 219, "total_crates": null }
fn_clm_hyperswitch_interfaces_set_error_-5204710062499041806
clm
function
// Repository: hyperswitch // Crate: hyperswitch_interfaces // Purpose: Trait definitions for connectors and services // Module: crates/hyperswitch_interfaces/src/events/connector_api_logs // Inherent implementation for ConnectorEvent /// fn set_error pub fn set_error(&mut self, error: serde_json::Value) { self.error = Some(error.to_string()); }
{ "crate": "hyperswitch_interfaces", "file": null, "file_size": null, "is_async": false, "is_pub": true, "num_enums": null, "num_structs": null, "num_tables": null, "score": 110, "total_crates": null }
fn_clm_router_main_6888976958380394518
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/build fn main() { // Set thread stack size to 10 MiB for debug builds // Reference: https://doc.rust-lang.org/std/thread/#stack-size #[cfg(debug_assertions)] println!("cargo:rustc-env=RUST_MIN_STACK=10485760"); // 10 * 1024 * 1024 = 10 MiB #[cfg(feature = "vergen")] router_env::vergen::generate_cargo_instructions(); }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 2, "total_crates": null }
fn_clm_router_invalidate_existing_cache_success_6859943756975655481
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/tests/cache async fn invalidate_existing_cache_success() { // Arrange Box::pin(utils::setup()).await; let (tx, _) = tokio::sync::oneshot::channel(); let app_state = Box::pin(routes::AppState::new( Settings::default(), tx, Box::new(services::MockApiClient), )) .await; let state = Arc::new(app_state) .get_session_state( &common_utils::id_type::TenantId::try_from_string("public".to_string()).unwrap(), None, || {}, ) .unwrap(); let cache_key = "cacheKey".to_string(); let cache_key_value = "val".to_string(); let _ = state .store .get_redis_conn() .unwrap() .set_key(&cache_key.clone().into(), cache_key_value.clone()) .await; let api_key = ("api-key", "test_admin"); let client = awc::Client::default(); cache::CONFIG_CACHE .push( CacheKey { key: cache_key.clone(), prefix: String::default(), }, cache_key_value.clone(), ) .await; cache::ACCOUNTS_CACHE .push( CacheKey { key: cache_key.clone(), prefix: String::default(), }, cache_key_value.clone(), ) .await; // Act let mut response = client .post(format!( "http://127.0.0.1:8080/cache/invalidate/{cache_key}" )) .insert_header(api_key) .send() .await .unwrap(); // Assert let response_body = response.body().await; println!("invalidate Cache: {response:?} : {response_body:?}"); assert_eq!(response.status(), awc::http::StatusCode::OK); assert!(cache::CONFIG_CACHE .get_val::<String>(CacheKey { key: cache_key.clone(), prefix: String::default() }) .await .is_none()); assert!(cache::ACCOUNTS_CACHE .get_val::<String>(CacheKey { key: cache_key, prefix: String::default() }) .await .is_none()); }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 62, "total_crates": null }
fn_clm_router_invalidate_non_existing_cache_success_6859943756975655481
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/tests/cache async fn invalidate_non_existing_cache_success() { // Arrange Box::pin(utils::setup()).await; let cache_key = "cacheKey".to_string(); let api_key = ("api-key", "test_admin"); let client = awc::Client::default(); // Act let mut response = client .post(format!( "http://127.0.0.1:8080/cache/invalidate/{cache_key}" )) .insert_header(api_key) .send() .await .unwrap(); // Assert let response_body = response.body().await; println!("invalidate Cache: {response:?} : {response_body:?}"); assert_eq!(response.status(), awc::http::StatusCode::NOT_FOUND); }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 16, "total_crates": null }
fn_clm_router_create_merchant_account_2823902949902393105
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/tests/integration_demo async fn create_merchant_account() { let server = Box::pin(mk_service()).await; let client = AppClient::guest(); let admin_client = client.admin("test_admin"); let expected = "merchant_12345"; let expected_merchant_id_type = common_utils::id_type::MerchantId::try_from(std::borrow::Cow::from("merchant_12345")) .unwrap(); let hlist_pat![merchant_id, _api_key]: HList![MerchantId, ApiKey] = admin_client .create_merchant_account(&server, expected.to_owned()) .await; assert_eq!(expected_merchant_id_type, *merchant_id); }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 37, "total_crates": null }
fn_clm_router_partial_refund_2823902949902393105
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/tests/integration_demo async fn partial_refund() { let authentication = ConnectorAuthentication::new(); let server = Box::pin(mk_service()).await; let client = AppClient::guest(); let admin_client = client.admin("test_admin"); let hlist_pat![merchant_id, api_key]: HList![MerchantId, ApiKey] = admin_client.create_merchant_account(&server, None).await; let _connector: serde_json::Value = admin_client .create_connector( &server, &merchant_id, "stripe", authentication.checkout.unwrap().api_key.peek(), ) .await; let user_client = client.user(&api_key); let hlist_pat![payment_id]: HList![PaymentId] = user_client.create_payment(&server, 100, 100).await; let hlist_pat![status]: HList![Status] = user_client.create_refund(&server, &payment_id, 50).await; assert_eq!(&*status, "pending"); let hlist_pat![status]: HList![Status] = user_client.create_refund(&server, &payment_id, 50).await; assert_eq!(&*status, "pending"); }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 24, "total_crates": null }
fn_clm_router_exceed_refund_2823902949902393105
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/tests/integration_demo async fn exceed_refund() { let authentication = ConnectorAuthentication::new(); let server = Box::pin(mk_service()).await; let client = AppClient::guest(); let admin_client = client.admin("test_admin"); let hlist_pat![merchant_id, api_key]: HList![MerchantId, ApiKey] = admin_client.create_merchant_account(&server, None).await; let _connector: serde_json::Value = admin_client .create_connector( &server, &merchant_id, "stripe", authentication.checkout.unwrap().api_key.peek(), ) .await; let user_client = client.user(&api_key); let hlist_pat![payment_id]: HList![PaymentId] = user_client.create_payment(&server, 100, 100).await; let hlist_pat![status]: HList![Status] = user_client.create_refund(&server, &payment_id, 50).await; assert_eq!(&*status, "pending"); let message: serde_json::Value = user_client.create_refund(&server, &payment_id, 100).await; assert_eq!( message.get("error").unwrap().get("message").unwrap(), "The refund amount exceeds the amount captured." ); }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 24, "total_crates": null }
fn_clm_router_refunds_todo_4400093523496731139
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/tests/refunds async fn refunds_todo() { Box::pin(utils::setup()).await; let client = awc::Client::default(); let mut response; let mut response_body; let get_endpoints = vec!["list"]; let post_endpoints: Vec<&str> = vec![]; for endpoint in get_endpoints { response = client .get(format!("http://127.0.0.1:8080/refunds/{endpoint}")) .send() .await .unwrap(); response_body = response.body().await; println!("{endpoint} =:= {response:?} : {response_body:?}"); assert_eq!(response.status(), awc::http::StatusCode::OK); } for endpoint in post_endpoints { response = client .post(format!("http://127.0.0.1:8080/refunds/{endpoint}")) .send() .await .unwrap(); response_body = response.body().await; println!("{endpoint} =:= {response:?} : {response_body:?}"); assert_eq!(response.status(), awc::http::StatusCode::OK); } }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 18, "total_crates": null }
fn_clm_router_refund_create_fail_stripe_4400093523496731139
clm
function
// Repository: hyperswitch // Crate: router // Purpose: Main application server handling HTTP requests, authentication, and business logic orchestration // Module: crates/router/tests/refunds async fn refund_create_fail_stripe() { let app = Box::pin(mk_service()).await; let client = AppClient::guest(); let user_client = client.user("321"); let payment_id = common_utils::id_type::PaymentId::generate_test_payment_id_for_sample_data(); let refund: serde_json::Value = user_client.create_refund(&app, &payment_id, 10).await; assert_eq!(refund.get("error").unwrap().get("message").unwrap(), "Access forbidden, invalid API key was used. Please create your new API key from the Dashboard Settings section."); }
{ "crate": "router", "file": null, "file_size": null, "is_async": false, "is_pub": false, "num_enums": null, "num_structs": null, "num_tables": null, "score": 12, "total_crates": null }