id stringlengths 11 116 | type stringclasses 1
value | granularity stringclasses 4
values | content stringlengths 16 477k | metadata dict |
|---|---|---|---|---|
fn_clm_masking_into_inner_-1768448059029681441 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/maskable
// Inherent implementation for Maskable<T>
/// Get the inner data while consuming self
pub fn into_inner(self) -> T {
match self {
Self::Masked(inner_secret) => inner_secret.expose(),
Self::Normal(inner) => inner,
}
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 2063,
"total_crates": null
} |
fn_clm_masking_hash_-1768448059029681441 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/maskable
// Implementation of Maskable<T> for std::hash::Hash
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Masked(value) => crate::PeekInterface::peek(value).hash(state),
Self::Normal(value) => value.hash(state),
}
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 347,
"total_crates": null
} |
fn_clm_masking_into_masked_-1768448059029681441 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/maskable
// Implementation of Secret<String> for Mask
fn into_masked(self) -> Maskable<Self::Output> {
Maskable::new_masked(self)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 124,
"total_crates": null
} |
fn_clm_masking_fmt_-1768448059029681441 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/maskable
// Implementation of Maskable<T> for std::fmt::Debug
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Masked(secret_value) => std::fmt::Debug::fmt(secret_value, f),
Self::Normal(value) => std::fmt::Debug::fmt(value, f),
}
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 42,
"total_crates": null
} |
fn_clm_masking_deserialize_8749675277349566939 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/cassandra
// Implementation of StrongSecret<T> for DeserializeValue<'frame, 'metadata>
fn deserialize(
column_type: &'metadata ColumnType<'metadata>,
v: Option<scylla::deserialize::FrameSlice<'frame>>,
) -> Result<Self, scylla::deserialize::DeserializationError> {
Ok(Self::new(T::deserialize(column_type, v)?))
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 138,
"total_crates": null
} |
fn_clm_masking_serialize_8749675277349566939 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/cassandra
// Implementation of StrongSecret<T> for SerializeValue
fn serialize<'b>(
&self,
column_type: &ColumnType<'_>,
writer: CellWriter<'b>,
) -> Result<WrittenCellProof<'b>, SerializationError> {
self.peek().serialize(column_type, writer)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 87,
"total_crates": null
} |
fn_clm_masking_type_check_8749675277349566939 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/cassandra
// Implementation of StrongSecret<T> for DeserializeValue<'frame, 'metadata>
fn type_check(column_type: &ColumnType<'_>) -> Result<(), scylla::deserialize::TypeCheckError> {
T::type_check(column_type)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 13,
"total_crates": null
} |
fn_clm_masking_build_-4584001233183914012 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/diesel
// Implementation of StrongSecret<S, I> for Queryable<ST, DB>
fn build(row: Self::Row) -> deserialize::Result<Self> {
Ok(row)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 572,
"total_crates": null
} |
fn_clm_masking_to_sql_-4584001233183914012 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/diesel
// Implementation of StrongSecret<S, I> for ToSql<T, DB>
fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, DB>) -> serialize::Result {
ToSql::<T, DB>::to_sql(&self.inner_secret, out)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 100,
"total_crates": null
} |
fn_clm_masking_from_sql_-4584001233183914012 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/diesel
// Implementation of StrongSecret<S, I> for FromSql<T, DB>
fn from_sql(bytes: DB::RawValue<'_>) -> deserialize::Result<Self> {
S::from_sql(bytes).map(|raw| raw.into())
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 56,
"total_crates": null
} |
fn_clm_masking_as_expression_-4584001233183914012 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/diesel
// Implementation of StrongSecret<S, I> for AsExpression<T>
fn as_expression(self) -> Self::Expression {
Bound::new(self)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 10,
"total_crates": null
} |
fn_clm_masking_from_str_6055384577069420048 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/string
// Implementation of StrongSecret<String, I> for FromStr
fn from_str(src: &str) -> Result<Self, Self::Err> {
Ok(Self::new(src.to_string()))
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 783,
"total_crates": null
} |
fn_clm_masking_new_-6778285756021183017 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/bytes
// Inherent implementation for SecretBytesMut
/// Wrap bytes in `SecretBytesMut`
pub fn new(bytes: impl Into<BytesMut>) -> Self {
Self(bytes.into())
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 14467,
"total_crates": null
} |
fn_clm_masking_from_-6778285756021183017 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/bytes
// Implementation of SecretBytesMut for From<BytesMut>
fn from(bytes: BytesMut) -> Self {
Self::new(bytes)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 2602,
"total_crates": null
} |
fn_clm_masking_peek_-6778285756021183017 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/bytes
// Implementation of SecretBytesMut for PeekInterface<BytesMut>
fn peek(&self) -> &BytesMut {
&self.0
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1193,
"total_crates": null
} |
fn_clm_masking_deserialize_-6778285756021183017 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/bytes
// Implementation of SecretBytesMut for Deserialize<'de>
fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
struct SecretBytesVisitor;
impl<'de> de::Visitor<'de> for SecretBytesVisitor {
type Value = SecretBytesMut;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("byte array")
}
#[inline]
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
let mut bytes = BytesMut::with_capacity(v.len());
bytes.extend_from_slice(v);
Ok(SecretBytesMut(bytes))
}
#[inline]
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where
V: de::SeqAccess<'de>,
{
// 4096 is cargo culted from upstream
let len = core::cmp::min(seq.size_hint().unwrap_or(0), 4096);
let mut bytes = BytesMut::with_capacity(len);
use bytes::BufMut;
while let Some(value) = seq.next_element()? {
bytes.put_u8(value);
}
Ok(SecretBytesMut(bytes))
}
}
deserializer.deserialize_bytes(SecretBytesVisitor)
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 160,
"total_crates": null
} |
fn_clm_masking_fmt_-6778285756021183017 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/bytes
// Implementation of SecretBytesMut for fmt::Debug
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SecretBytesMut([REDACTED])")
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 38,
"total_crates": null
} |
fn_clm_masking_clone_-5120581695744760835 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/secret
// Implementation of Secret<SecretValue, MaskingStrategy> for Clone
fn clone(&self) -> Self {
Self {
inner_secret: self.inner_secret.clone(),
masking_strategy: PhantomData,
}
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 27325,
"total_crates": null
} |
fn_clm_masking_new_-5120581695744760835 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/secret
// Inherent implementation for Secret<SecretValue, MaskingStrategy>
/// Take ownership of a secret value
pub fn new(secret: SecretValue) -> Self {
Self {
inner_secret: secret,
masking_strategy: PhantomData,
}
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 14463,
"total_crates": null
} |
fn_clm_masking_map_-5120581695744760835 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/secret
// Inherent implementation for Secret<SecretValue, MaskingStrategy>
/// consume self and modify the inner value
pub fn map<OtherSecretValue>(
self,
f: impl FnOnce(SecretValue) -> OtherSecretValue,
) -> Secret<OtherSecretValue, MaskingStrategy>
where
MaskingStrategy: Strategy<OtherSecretValue>,
{
f(self.inner_secret).into()
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 10684,
"total_crates": null
} |
fn_clm_masking_default_-5120581695744760835 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/secret
// Implementation of Secret<SecretValue, MaskingStrategy> for Default
fn default() -> Self {
SecretValue::default().into()
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7707,
"total_crates": null
} |
fn_clm_masking_as_ref_-5120581695744760835 | clm | function | // Repository: hyperswitch
// Crate: masking
// Purpose: PII protection and data masking
// Module: crates/masking/src/secret
// Implementation of Secret<Vec<u8>> for AsRef<[u8]>
fn as_ref(&self) -> &[u8] {
self.peek().as_slice()
}
| {
"crate": "masking",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7557,
"total_crates": null
} |
fn_clm_injector_new_-6348947890272314470 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/injector
// Implementation of None for Injector
pub fn new() -> Self {
Self
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 14463,
"total_crates": null
} |
fn_clm_injector_default_-6348947890272314470 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/injector
// Implementation of Injector for Default
fn default() -> Self {
Self::new()
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7705,
"total_crates": null
} |
fn_clm_injector_from_-6348947890272314470 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/injector
// Implementation of Method for From<injector_types::HttpMethod>
fn from(method: injector_types::HttpMethod) -> Self {
match method {
injector_types::HttpMethod::GET => Self::Get,
injector_types::HttpMethod::POST => Self::Post,
injector_types::HttpMethod::PUT => Self::Put,
injector_types::HttpMethod::PATCH => Self::Patch,
injector_types::HttpMethod::DELETE => Self::Delete,
}
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 2600,
"total_crates": null
} |
fn_clm_injector_make_http_request_-6348947890272314470 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/injector
// Implementation of None for Injector
async fn make_http_request(
&self,
config: &injector_types::ConnectionConfig,
payload: &str,
content_type: &ContentType,
) -> error_stack::Result<InjectorResponse, InjectorError> {
logger::info!(
method = ?config.http_method,
endpoint = %config.endpoint,
content_type = ?content_type,
payload_length = payload.len(),
headers_count = config.headers.len(),
"Making HTTP request to connector"
);
// Validate inputs first
if config.endpoint.is_empty() {
logger::error!("Endpoint URL is empty");
Err(error_stack::Report::new(InjectorError::InvalidTemplate(
"Endpoint URL cannot be empty".to_string(),
)))?;
}
// Parse and validate the complete endpoint URL
let url = reqwest::Url::parse(&config.endpoint).map_err(|e| {
logger::error!("Failed to parse endpoint URL: {}", e);
error_stack::Report::new(InjectorError::InvalidTemplate(format!(
"Invalid endpoint URL: {e}"
)))
})?;
logger::debug!("Constructed URL: {}", url);
// Convert headers to common_utils Headers format safely
let headers: Vec<(String, masking::Maskable<String>)> = config
.headers
.clone()
.into_iter()
.map(|(k, v)| (k, masking::Maskable::new_normal(v.expose().clone())))
.collect();
// Determine method and request content
let method = Method::from(config.http_method);
// Determine request content based on content type with error handling
let request_content = match content_type {
ContentType::ApplicationJson => {
// Try to parse as JSON, fallback to raw string
match serde_json::from_str::<Value>(payload) {
Ok(json) => Some(RequestContent::Json(Box::new(json))),
Err(e) => {
logger::debug!(
"Failed to parse payload as JSON: {}, falling back to raw bytes",
e
);
Some(RequestContent::RawBytes(payload.as_bytes().to_vec()))
}
}
}
ContentType::ApplicationXWwwFormUrlencoded => {
// Parse form data safely
let form_data: HashMap<String, String> =
url::form_urlencoded::parse(payload.as_bytes())
.into_owned()
.collect();
Some(RequestContent::FormUrlEncoded(Box::new(form_data)))
}
ContentType::ApplicationXml | ContentType::TextXml => {
Some(RequestContent::RawBytes(payload.as_bytes().to_vec()))
}
ContentType::TextPlain => {
Some(RequestContent::RawBytes(payload.as_bytes().to_vec()))
}
};
// Extract vault metadata directly from headers using existing functions
let (vault_proxy_url, vault_ca_cert) = if config
.headers
.contains_key(crate::consts::EXTERNAL_VAULT_METADATA_HEADER)
{
let mut temp_config = injector_types::ConnectionConfig::new(
config.endpoint.clone(),
config.http_method,
);
// Use existing vault metadata extraction with fallback
if temp_config.extract_and_apply_vault_metadata_with_fallback(&config.headers) {
(temp_config.proxy_url, temp_config.ca_cert)
} else {
(None, None)
}
} else {
(None, None)
};
// Build request safely with certificate configuration
let mut request_builder = RequestBuilder::new()
.method(method)
.url(url.as_str())
.headers(headers);
if let Some(content) = request_content {
request_builder = request_builder.set_body(content);
}
// Create final config with vault CA certificate if available
let mut final_config = config.clone();
let has_vault_ca_cert = vault_ca_cert.is_some();
if has_vault_ca_cert {
final_config.ca_cert = vault_ca_cert;
}
// Log certificate configuration (but not the actual content)
logger::info!(
has_client_cert = final_config.client_cert.is_some(),
has_client_key = final_config.client_key.is_some(),
has_ca_cert = final_config.ca_cert.is_some(),
has_vault_ca_cert = has_vault_ca_cert,
insecure = final_config.insecure.unwrap_or(false),
cert_format = ?final_config.cert_format,
"Certificate configuration applied"
);
// Build request with certificate configuration applied
let request = build_request_with_certificates(request_builder, &final_config);
// Determine which proxy to use: vault metadata > backup > none
let final_proxy_url = vault_proxy_url.or_else(|| config.backup_proxy_url.clone());
let proxy = if let Some(proxy_url) = final_proxy_url {
let proxy_url_str = proxy_url.expose();
// Set proxy URL for both HTTP and HTTPS traffic
Proxy {
http_url: Some(proxy_url_str.clone()),
https_url: Some(proxy_url_str),
idle_pool_connection_timeout: Some(90),
bypass_proxy_hosts: None,
}
} else {
Proxy::default()
};
// Send request using local standalone http client
let response = send_request(&proxy, request, None).await?;
// Convert reqwest::Response to InjectorResponse using trait
response
.into_injector_response()
.await
.map_err(|e| error_stack::Report::new(e))
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 121,
"total_crates": null
} |
fn_clm_injector_send_request_-6348947890272314470 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/injector
pub async fn send_request(
client_proxy: &Proxy,
request: common_utils::request::Request,
_option_timeout_secs: Option<u64>,
) -> error_stack::Result<reqwest::Response, InjectorError> {
logger::info!(
has_client_cert = request.certificate.is_some(),
has_client_key = request.certificate_key.is_some(),
has_ca_cert = request.ca_certificate.is_some(),
"Making HTTP request using standalone injector HTTP client with configuration"
);
// Create reqwest client using the proven create_client function
let client = create_client(
client_proxy,
request.certificate.clone(),
request.certificate_key.clone(),
request.ca_certificate.clone(),
)?;
// Build the request
let method = match request.method {
Method::Get => reqwest::Method::GET,
Method::Post => reqwest::Method::POST,
Method::Put => reqwest::Method::PUT,
Method::Patch => reqwest::Method::PATCH,
Method::Delete => reqwest::Method::DELETE,
};
let mut req_builder = client.request(method, &request.url);
// Add headers
for (key, value) in &request.headers {
let header_value = match value {
masking::Maskable::Masked(secret) => secret.clone().expose(),
masking::Maskable::Normal(normal) => normal.clone(),
};
req_builder = req_builder.header(key, header_value);
}
// Add body if present
if let Some(body) = request.body {
match body {
RequestContent::Json(payload) => {
req_builder = req_builder.json(&payload);
}
RequestContent::FormUrlEncoded(payload) => {
req_builder = req_builder.form(&payload);
}
RequestContent::RawBytes(payload) => {
req_builder = req_builder.body(payload);
}
_ => {
logger::warn!("Unsupported request content type, using raw bytes");
}
}
}
// Send the request
let response = req_builder
.send()
.await
.map_err(|e| log_and_convert_http_error(e, "send_request"))?;
logger::info!(
status_code = response.status().as_u16(),
"HTTP request completed successfully"
);
Ok(response)
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 82,
"total_crates": null
} |
fn_clm_injector_new_573252899843830129 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/types
// Inherent implementation for ConnectionConfig
/// Creates a new ConnectionConfig from basic parameters
pub fn new(endpoint: String, http_method: HttpMethod) -> Self {
Self {
endpoint,
http_method,
headers: HashMap::new(),
proxy_url: None,
backup_proxy_url: None,
client_cert: None,
client_key: None,
ca_cert: None,
insecure: None,
cert_password: None,
cert_format: None,
max_response_size: None,
}
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 14465,
"total_crates": null
} |
fn_clm_injector_into_injector_response_573252899843830129 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/types
// Implementation of reqwest::Response for IntoInjectorResponse
async fn into_injector_response(
self,
) -> Result<InjectorResponse, crate::injector::core::InjectorError> {
let status_code = self.status().as_u16();
logger::info!(
status_code = status_code,
"Converting reqwest::Response to InjectorResponse"
);
// Extract headers
let headers: Option<HashMap<String, String>> = {
let header_map: HashMap<String, String> = self
.headers()
.iter()
.filter_map(|(name, value)| {
value
.to_str()
.ok()
.map(|v| (name.to_string(), v.to_string()))
})
.collect();
if header_map.is_empty() {
None
} else {
Some(header_map)
}
};
let response_text = self
.text()
.await
.map_err(|_| crate::injector::core::InjectorError::HttpRequestFailed)?;
logger::debug!(
response_length = response_text.len(),
headers_count = headers.as_ref().map(|h| h.len()).unwrap_or(0),
"Processing connector response"
);
let response_data = match serde_json::from_str::<serde_json::Value>(&response_text) {
Ok(json) => json,
Err(_e) => serde_json::Value::String(response_text),
};
Ok(InjectorResponse {
status_code,
headers,
response: response_data,
})
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 41,
"total_crates": null
} |
fn_clm_injector_from_base64_header_-7496079818707784117 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/vault_metadata
// Inherent implementation for VaultMetadataFactory
/// Create a vault metadata processor from base64 encoded header value with comprehensive validation
pub fn from_base64_header(
base64_value: &str,
) -> Result<Box<dyn VaultMetadataProcessor>, VaultMetadataError> {
// Validate input
if base64_value.trim().is_empty() {
return Err(VaultMetadataError::EmptyOrMalformedHeader);
}
// Decode base64 with detailed error context
let decoded_bytes = BASE64_ENGINE.decode(base64_value.trim()).map_err(|e| {
logger::error!(
error = %e,
"Failed to decode base64 vault metadata header"
);
VaultMetadataError::Base64DecodingFailed(format!("Invalid base64 encoding: {e}"))
})?;
// Validate decoded size
if decoded_bytes.is_empty() {
return Err(VaultMetadataError::EmptyOrMalformedHeader);
}
if decoded_bytes.len() > 1_000_000 {
return Err(VaultMetadataError::JsonParsingFailed(
"Decoded vault metadata is too large (>1MB)".to_string(),
));
}
// Parse JSON with detailed error context
let metadata: ExternalVaultProxyMetadata =
serde_json::from_slice(&decoded_bytes).map_err(|e| {
logger::error!(
error = %e,
"Failed to parse vault metadata JSON"
);
VaultMetadataError::JsonParsingFailed(format!("Invalid JSON structure: {e}"))
})?;
logger::info!(
vault_connector = ?metadata.vault_connector(),
"Successfully parsed vault metadata from header"
);
Ok(Box::new(metadata))
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 52,
"total_crates": null
} |
fn_clm_injector_test_vault_metadata_processing_-7496079818707784117 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/vault_metadata
fn test_vault_metadata_processing() {
// Create test VGS metadata with base64 encoded certificate
let vgs_metadata = VgsMetadata {
proxy_url: "https://vgs-proxy.example.com:8443"
.parse()
.expect("Valid test URL"),
certificate: Secret::new("cert".to_string()),
};
let metadata = ExternalVaultProxyMetadata::VgsMetadata(vgs_metadata);
// Serialize and base64 encode (as it would come from the header)
let metadata_json =
serde_json::to_vec(&metadata).expect("Metadata serialization should succeed");
let base64_metadata = BASE64_ENGINE.encode(&metadata_json);
// Create headers with vault metadata
let mut headers = HashMap::new();
headers.insert(
"Content-Type".to_string(),
Secret::new("application/json".to_string()),
);
headers.insert(
"Authorization".to_string(),
Secret::new("Bearer token123".to_string()),
);
headers.insert(
EXTERNAL_VAULT_METADATA_HEADER.to_string(),
Secret::new(base64_metadata),
);
// Test the amazing automatic processing with the unified API!
let injector_request = InjectorRequest::new(
"https://api.example.com/v1/payments".to_string(),
HttpMethod::POST,
"amount={{$amount}}¤cy={{$currency}}".to_string(),
TokenData {
vault_connector: VaultConnectors::VGS,
specific_token_data: SecretSerdeValue::new(serde_json::json!({
"amount": "1000",
"currency": "USD"
})),
},
Some(headers),
None, // No fallback proxy needed - vault metadata provides it
None, // No fallback client cert
None, // No fallback client key
None, // No fallback CA cert
);
// Verify vault metadata was automatically applied!
assert!(injector_request.connection_config.proxy_url.is_some());
assert!(injector_request.connection_config.ca_cert.is_some());
assert_eq!(
injector_request
.connection_config
.proxy_url
.as_ref()
.expect("Proxy URL should be set")
.clone()
.expose(),
"https://vgs-proxy.example.com:8443/"
);
// Verify vault metadata header was removed from regular headers
assert!(!injector_request
.connection_config
.headers
.contains_key(EXTERNAL_VAULT_METADATA_HEADER));
// Verify other headers are preserved
assert!(injector_request
.connection_config
.headers
.contains_key("Content-Type"));
assert!(injector_request
.connection_config
.headers
.contains_key("Authorization"));
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 44,
"total_crates": null
} |
fn_clm_injector_extract_and_apply_vault_metadata_-7496079818707784117 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/vault_metadata
// Implementation of ConnectionConfig for VaultMetadataExtractor
fn extract_and_apply_vault_metadata(
&mut self,
headers: &HashMap<String, Secret<String>>,
) -> Result<(), VaultMetadataError> {
if let Some(vault_metadata_header) = headers.get(EXTERNAL_VAULT_METADATA_HEADER) {
let processor =
VaultMetadataFactory::from_base64_header(&vault_metadata_header.clone().expose())
.map_err(|e| {
logger::error!(
error = %e,
"Failed to create vault metadata processor from header"
);
e
})?;
processor.process_metadata(self).map_err(|e| {
logger::error!(
error = %e,
vault_connector = ?processor.vault_connector(),
"Failed to apply vault metadata to connection config"
);
e
})?;
logger::info!(
vault_connector = ?processor.vault_connector(),
proxy_url_applied = self.proxy_url.is_some(),
ca_cert_applied = self.ca_cert.is_some(),
client_cert_applied = self.client_cert.is_some(),
"Successfully applied vault metadata to connection configuration"
);
}
Ok(())
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 25,
"total_crates": null
} |
fn_clm_injector_url_validation_failed_-7496079818707784117 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/vault_metadata
// Inherent implementation for VaultMetadataError
/// Create a URL validation error with context
pub fn url_validation_failed(field: &str, url: &str, reason: impl Into<String>) -> Self {
Self::UrlValidationFailed {
field: field.to_string(),
url: url.to_string(),
reason: reason.into(),
}
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 24,
"total_crates": null
} |
fn_clm_injector_extract_and_apply_vault_metadata_with_fallback_from_header_-7496079818707784117 | clm | function | // Repository: hyperswitch
// Crate: injector
// Module: crates/injector/src/vault_metadata
// Implementation of ConnectionConfig for VaultMetadataExtractorExt
fn extract_and_apply_vault_metadata_with_fallback_from_header(
&mut self,
header_value: &str,
) -> bool {
let mut temp_headers = HashMap::new();
temp_headers.insert(
EXTERNAL_VAULT_METADATA_HEADER.to_string(),
Secret::new(header_value.to_string()),
);
self.extract_and_apply_vault_metadata_with_fallback(&temp_headers)
}
| {
"crate": "injector",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 20,
"total_crates": null
} |
fn_clm_analytics_get_domain_info_8650457006571152904 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/core
pub async fn get_domain_info(
domain: AnalyticsDomain,
) -> crate::errors::AnalyticsResult<GetInfoResponse> {
let info = match domain {
AnalyticsDomain::Payments => GetInfoResponse {
metrics: utils::get_payment_metrics_info(),
download_dimensions: None,
dimensions: utils::get_payment_dimensions(),
},
AnalyticsDomain::PaymentIntents => GetInfoResponse {
metrics: utils::get_payment_intent_metrics_info(),
download_dimensions: None,
dimensions: utils::get_payment_intent_dimensions(),
},
AnalyticsDomain::Refunds => GetInfoResponse {
metrics: utils::get_refund_metrics_info(),
download_dimensions: None,
dimensions: utils::get_refund_dimensions(),
},
AnalyticsDomain::Frm => GetInfoResponse {
metrics: utils::get_frm_metrics_info(),
download_dimensions: None,
dimensions: utils::get_frm_dimensions(),
},
AnalyticsDomain::SdkEvents => GetInfoResponse {
metrics: utils::get_sdk_event_metrics_info(),
download_dimensions: None,
dimensions: utils::get_sdk_event_dimensions(),
},
AnalyticsDomain::AuthEvents => GetInfoResponse {
metrics: utils::get_auth_event_metrics_info(),
download_dimensions: None,
dimensions: utils::get_auth_event_dimensions(),
},
AnalyticsDomain::ApiEvents => GetInfoResponse {
metrics: utils::get_api_event_metrics_info(),
download_dimensions: None,
dimensions: utils::get_api_event_dimensions(),
},
AnalyticsDomain::Dispute => GetInfoResponse {
metrics: utils::get_dispute_metrics_info(),
download_dimensions: None,
dimensions: utils::get_dispute_dimensions(),
},
AnalyticsDomain::Routing => GetInfoResponse {
metrics: utils::get_payment_metrics_info(),
download_dimensions: None,
dimensions: utils::get_payment_dimensions(),
},
};
Ok(info)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 49,
"total_crates": null
} |
fn_clm_analytics_default_1573310292429270265 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sqlx
// Implementation of SqlxClient for Default
fn default() -> Self {
let database_url = format!(
"postgres://{}:{}@{}:{}/{}",
"db_user", "db_pass", "localhost", 5432, "hyperswitch_db"
);
Self {
#[allow(clippy::expect_used)]
pool: PgPoolOptions::new()
.connect_lazy(&database_url)
.expect("SQLX Pool Creation failed"),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7707,
"total_crates": null
} |
fn_clm_analytics_decode_1573310292429270265 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sqlx
// Implementation of DBEnumWrapper<Type> for Decode<'r, Postgres>
fn decode(
value: PgValueRef<'r>,
) -> Result<Self, Box<dyn std::error::Error + 'static + Send + Sync>> {
let str_value = <&'r str as Decode<'r, Postgres>>::decode(value)?;
Type::from_str(str_value).map(DBEnumWrapper).or(Err(format!(
"invalid value {:?} for enum {}",
str_value,
Type::name()
)
.into()))
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 321,
"total_crates": null
} |
fn_clm_analytics_to_sql_1573310292429270265 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sqlx
// Implementation of Window<T> for ToSql<SqlxClient>
fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result<String, ParsingError> {
Ok(match self {
Self::Sum {
field,
partition_by,
order_by,
alias,
} => {
format!(
"sum({}) over ({}{}){}",
field
.to_sql(table_engine)
.attach_printable("Failed to sum window")?,
partition_by.as_ref().map_or_else(
|| "".to_owned(),
|partition_by| format!("partition by {}", partition_by.to_owned())
),
order_by.as_ref().map_or_else(
|| "".to_owned(),
|(order_column, order)| format!(
" order by {} {}",
order_column.to_owned(),
order
)
),
alias.map_or_else(|| "".to_owned(), |alias| format!(" as {alias}"))
)
}
Self::RowNumber {
field: _,
partition_by,
order_by,
alias,
} => {
format!(
"row_number() over ({}{}){}",
partition_by.as_ref().map_or_else(
|| "".to_owned(),
|partition_by| format!("partition by {}", partition_by.to_owned())
),
order_by.as_ref().map_or_else(
|| "".to_owned(),
|(order_column, order)| format!(
" order by {} {}",
order_column.to_owned(),
order
)
),
alias.map_or_else(|| "".to_owned(), |alias| format!(" as {alias}"))
)
}
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 98,
"total_crates": null
} |
fn_clm_analytics_from_row_1573310292429270265 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sqlx
// Implementation of super::disputes::metrics::DisputeMetricRow for FromRow<'a, PgRow>
fn from_row(row: &'a PgRow) -> sqlx::Result<Self> {
let dispute_stage: Option<DBEnumWrapper<DisputeStage>> =
row.try_get("dispute_stage").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let dispute_status: Option<DBEnumWrapper<DisputeStatus>> =
row.try_get("dispute_status").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let connector: Option<String> = row.try_get("connector").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let currency: Option<DBEnumWrapper<Currency>> =
row.try_get("currency").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let total: Option<bigdecimal::BigDecimal> = row.try_get("total").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
let count: Option<i64> = row.try_get("count").or_else(|e| match e {
ColumnNotFound(_) => Ok(Default::default()),
e => Err(e),
})?;
// Removing millisecond precision to get accurate diffs against clickhouse
let start_bucket: Option<PrimitiveDateTime> = row
.try_get::<Option<PrimitiveDateTime>, _>("start_bucket")?
.and_then(|dt| dt.replace_millisecond(0).ok());
let end_bucket: Option<PrimitiveDateTime> = row
.try_get::<Option<PrimitiveDateTime>, _>("end_bucket")?
.and_then(|dt| dt.replace_millisecond(0).ok());
Ok(Self {
dispute_stage,
dispute_status,
connector,
currency,
total,
count,
start_bucket,
end_bucket,
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 59,
"total_crates": null
} |
fn_clm_analytics_from_conf_1573310292429270265 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/sqlx
// Inherent implementation for SqlxClient
pub async fn from_conf(conf: &Database, schema: &str) -> Self {
let database_url = conf.get_database_url(schema);
#[allow(clippy::expect_used)]
let pool = PgPoolOptions::new()
.max_connections(conf.pool_size)
.acquire_timeout(std::time::Duration::from_secs(conf.connection_timeout))
.connect_lazy(&database_url)
.expect("SQLX Pool Creation failed");
Self { pool }
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 45,
"total_crates": null
} |
fn_clm_analytics_default_-1563023424954240744 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/types
// Implementation of DBEnumWrapper<T> for Default
fn default() -> Self {
Self(T::default())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7707,
"total_crates": null
} |
fn_clm_analytics_as_ref_-1563023424954240744 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/types
// Implementation of DBEnumWrapper<T> for AsRef<T>
fn as_ref(&self) -> &T {
&self.0
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7553,
"total_crates": null
} |
fn_clm_analytics_switch_-1563023424954240744 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/types
// Implementation of FiltersError for ErrorSwitch<AnalyticsError>
fn switch(&self) -> AnalyticsError {
match self {
Self::QueryBuildingError | Self::QueryExecutionFailure => AnalyticsError::UnknownError,
Self::NotImplemented(a) => AnalyticsError::NotImplemented(a),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 3082,
"total_crates": null
} |
fn_clm_analytics_from_str_-1563023424954240744 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/types
// Implementation of DBEnumWrapper<T> for FromStr
fn from_str(s: &str) -> Result<Self, Self::Err> {
T::from_str(s)
.map_err(|_er| report!(ParsingError::EnumParseFailure(std::any::type_name::<T>())))
.map(DBEnumWrapper)
.attach_printable_lazy(|| format!("raw_value: {s}"))
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 787,
"total_crates": null
} |
fn_clm_analytics_get_table_engine_-1563023424954240744 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/types
fn get_table_engine(_table: AnalyticsCollection) -> TableEngine {
TableEngine::BasicTree
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 3,
"total_crates": null
} |
fn_clm_analytics_invoke_lambda_5670610063963438620 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/lambda_utils
pub async fn invoke_lambda(
function_name: &str,
region: &str,
json_bytes: &[u8],
) -> CustomResult<(), AnalyticsError> {
get_aws_client(region.to_string())
.await
.invoke()
.function_name(function_name)
.invocation_type(Event)
.payload(Blob::new(json_bytes.to_owned()))
.send()
.await
.map_err(|er| {
let er_rep = format!("{er:?}");
report!(er).attach_printable(er_rep)
})
.change_context(AnalyticsError::UnknownError)
.attach_printable("Lambda invocation failed")?;
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 72,
"total_crates": null
} |
fn_clm_analytics_get_aws_client_5670610063963438620 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/lambda_utils
async fn get_aws_client(region: String) -> Client {
let region_provider = RegionProviderChain::first_try(Region::new(region));
let sdk_config = aws_config::from_env().region(region_provider).load().await;
Client::new(&sdk_config)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 15,
"total_crates": null
} |
fn_clm_analytics_default_-7923696972310479287 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/clickhouse
// Implementation of ClickhouseConfig for Default
fn default() -> Self {
Self {
username: "default".to_string(),
password: None,
host: "http://localhost:8123".to_string(),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7707,
"total_crates": null
} |
fn_clm_analytics_try_into_-7923696972310479287 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/clickhouse
// Implementation of serde_json::Value for TryInto<ActivePaymentsMetricRow>
fn try_into(self) -> Result<ActivePaymentsMetricRow, Self::Error> {
serde_json::from_value(self).change_context(ParsingError::StructParseFailure(
"Failed to parse ActivePaymentsMetricRow in clickhouse results",
))
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 173,
"total_crates": null
} |
fn_clm_analytics_to_sql_-7923696972310479287 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/clickhouse
// Implementation of Window<T> for ToSql<ClickhouseClient>
fn to_sql(&self, table_engine: &TableEngine) -> error_stack::Result<String, ParsingError> {
Ok(match self {
Self::Sum {
field,
partition_by,
order_by,
alias,
} => {
format!(
"sum({}) over ({}{}){}",
field
.to_sql(table_engine)
.attach_printable("Failed to sum window")?,
partition_by.as_ref().map_or_else(
|| "".to_owned(),
|partition_by| format!("partition by {}", partition_by.to_owned())
),
order_by.as_ref().map_or_else(
|| "".to_owned(),
|(order_column, order)| format!(
" order by {} {}",
order_column.to_owned(),
order
)
),
alias.map_or_else(|| "".to_owned(), |alias| format!(" as {alias}"))
)
}
Self::RowNumber {
field: _,
partition_by,
order_by,
alias,
} => {
format!(
"row_number() over ({}{}){}",
partition_by.as_ref().map_or_else(
|| "".to_owned(),
|partition_by| format!("partition by {}", partition_by.to_owned())
),
order_by.as_ref().map_or_else(
|| "".to_owned(),
|(order_column, order)| format!(
" order by {} {}",
order_column.to_owned(),
order
)
),
alias.map_or_else(|| "".to_owned(), |alias| format!(" as {alias}"))
)
}
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 98,
"total_crates": null
} |
fn_clm_analytics_execute_query_-7923696972310479287 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/clickhouse
// Inherent implementation for ClickhouseClient
async fn execute_query(&self, query: &str) -> ClickhouseResult<Vec<serde_json::Value>> {
logger::debug!("Executing query: {query}");
let client = reqwest::Client::new();
let params = CkhQuery {
date_time_output_format: String::from("iso"),
output_format_json_quote_64bit_integers: 0,
database: self.database.clone(),
};
let response = client
.post(&self.config.host)
.query(¶ms)
.basic_auth(self.config.username.clone(), self.config.password.clone())
.body(format!("{query}\nFORMAT JSON"))
.send()
.await
.change_context(ClickhouseError::ConnectionError)?;
logger::debug!(clickhouse_response=?response, query=?query, "Clickhouse response");
if response.status() != StatusCode::OK {
response.text().await.map_or_else(
|er| {
Err(ClickhouseError::ResponseError)
.attach_printable_lazy(|| format!("Error: {er:?}"))
},
|t| Err(report!(ClickhouseError::ResponseNotOK(t))),
)
} else {
Ok(response
.json::<CkhOutput<serde_json::Value>>()
.await
.change_context(ClickhouseError::ResponseError)?
.data)
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 49,
"total_crates": null
} |
fn_clm_analytics_deep_health_check_-7923696972310479287 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/clickhouse
// Implementation of ClickhouseClient for HealthCheck
async fn deep_health_check(
&self,
) -> common_utils::errors::CustomResult<(), QueryExecutionError> {
self.execute_query("SELECT 1")
.await
.map(|_| ())
.change_context(QueryExecutionError::DatabaseError)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 35,
"total_crates": null
} |
fn_clm_analytics_default_-4140845485991118937 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/lib
// Implementation of AnalyticsConfig for Default
fn default() -> Self {
Self::Sqlx {
sqlx: Database::default(),
forex_enabled: false,
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7705,
"total_crates": null
} |
fn_clm_analytics_from_conf_-4140845485991118937 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/lib
// Implementation of None for AnalyticsProvider
pub async fn from_conf(config: &AnalyticsConfig, tenant: &dyn TenantConfig) -> Self {
match config {
AnalyticsConfig::Sqlx { sqlx, .. } => {
Self::Sqlx(SqlxClient::from_conf(sqlx, tenant.get_schema()).await)
}
AnalyticsConfig::Clickhouse { clickhouse, .. } => Self::Clickhouse(ClickhouseClient {
config: Arc::new(clickhouse.clone()),
database: tenant.get_clickhouse_database().to_string(),
}),
AnalyticsConfig::CombinedCkh {
sqlx, clickhouse, ..
} => Self::CombinedCkh(
SqlxClient::from_conf(sqlx, tenant.get_schema()).await,
ClickhouseClient {
config: Arc::new(clickhouse.clone()),
database: tenant.get_clickhouse_database().to_string(),
},
),
AnalyticsConfig::CombinedSqlx {
sqlx, clickhouse, ..
} => Self::CombinedSqlx(
SqlxClient::from_conf(sqlx, tenant.get_schema()).await,
ClickhouseClient {
config: Arc::new(clickhouse.clone()),
database: tenant.get_clickhouse_database().to_string(),
},
),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 77,
"total_crates": null
} |
fn_clm_analytics_convert_to_raw_secret_-4140845485991118937 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/lib
// Implementation of AnalyticsConfig for SecretsHandler
async fn convert_to_raw_secret(
value: SecretStateContainer<Self, SecuredSecret>,
secret_management_client: &dyn SecretManagementInterface,
) -> CustomResult<SecretStateContainer<Self, RawSecret>, SecretsManagementError> {
let analytics_config = value.get_inner();
let decrypted_password = match analytics_config {
// Todo: Perform kms decryption of clickhouse password
Self::Clickhouse { .. } => masking::Secret::new(String::default()),
Self::Sqlx { sqlx, .. }
| Self::CombinedCkh { sqlx, .. }
| Self::CombinedSqlx { sqlx, .. } => {
secret_management_client
.get_secret(sqlx.password.clone())
.await?
}
};
Ok(value.transition_state(|conf| match conf {
Self::Sqlx {
sqlx,
forex_enabled,
} => Self::Sqlx {
sqlx: Database {
password: decrypted_password,
..sqlx
},
forex_enabled,
},
Self::Clickhouse {
clickhouse,
forex_enabled,
} => Self::Clickhouse {
clickhouse,
forex_enabled,
},
Self::CombinedCkh {
sqlx,
clickhouse,
forex_enabled,
} => Self::CombinedCkh {
sqlx: Database {
password: decrypted_password,
..sqlx
},
clickhouse,
forex_enabled,
},
Self::CombinedSqlx {
sqlx,
clickhouse,
forex_enabled,
} => Self::CombinedSqlx {
sqlx: Database {
password: decrypted_password,
..sqlx
},
clickhouse,
forex_enabled,
},
}))
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 74,
"total_crates": null
} |
fn_clm_analytics_fmt_-4140845485991118937 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/lib
// Implementation of AnalyticsProvider for std::fmt::Display
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let analytics_provider = match self {
Self::Clickhouse(_) => "Clickhouse",
Self::Sqlx(_) => "Sqlx",
Self::CombinedCkh(_, _) => "CombinedCkh",
Self::CombinedSqlx(_, _) => "CombinedSqlx",
};
write!(f, "{analytics_provider}")
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 38,
"total_crates": null
} |
fn_clm_analytics_get_payment_metrics_-4140845485991118937 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/lib
// Implementation of None for AnalyticsProvider
pub async fn get_payment_metrics(
&self,
metric: &PaymentMetrics,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
) -> types::MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
// Metrics to get the fetch time for each payment metric
metrics::request::record_operation_time(
async {
match self {
Self::Sqlx(pool) => {
metric
.load_metrics(
dimensions,
auth,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::Clickhouse(pool) => {
metric
.load_metrics(
dimensions,
auth,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::CombinedCkh(sqlx_pool, ckh_pool) => {
let (ckh_result, sqlx_result) = tokio::join!(metric
.load_metrics(
dimensions,
auth,
filters,
granularity,
time_range,
ckh_pool,
),
metric
.load_metrics(
dimensions,
auth,
filters,
granularity,
time_range,
sqlx_pool,
));
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics metrics")
},
_ => {}
};
ckh_result
}
Self::CombinedSqlx(sqlx_pool, ckh_pool) => {
let (ckh_result, sqlx_result) = tokio::join!(metric
.load_metrics(
dimensions,
auth,
filters,
granularity,
time_range,
ckh_pool,
),
metric
.load_metrics(
dimensions,
auth,
filters,
granularity,
time_range,
sqlx_pool,
));
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics metrics")
},
_ => {}
};
sqlx_result
}
}
},
&metrics::METRIC_FETCH_TIME,
metric,
self,
)
.await
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 27,
"total_crates": null
} |
fn_clm_analytics_new_-2546719242055680872 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/query
// Inherent implementation for QueryBuilder<T>
pub fn new(table: AnalyticsCollection) -> Self {
Self {
columns: Default::default(),
filters: Default::default(),
group_by: Default::default(),
order_by: Default::default(),
having: Default::default(),
limit_by: Default::default(),
outer_select: Default::default(),
top_n: Default::default(),
table,
distinct: Default::default(),
db_type: Default::default(),
table_engine: T::get_table_engine(table),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 14485,
"total_crates": null
} |
fn_clm_analytics_default_-2546719242055680872 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/query
// Implementation of Filter for Default
fn default() -> Self {
Self::NestedFilter(FilterCombinator::default(), Vec::new())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7709,
"total_crates": null
} |
fn_clm_analytics_add_select_column_-2546719242055680872 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/query
// Inherent implementation for QueryBuilder<T>
pub fn add_select_column(&mut self, column: impl ToSql<T>) -> QueryResult<()> {
self.columns.push(
column
.to_sql(&self.table_engine)
.change_context(QueryBuildingError::SqlSerializeError)
.attach_printable("Error serializing select column")?,
);
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1052,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_-2546719242055680872 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/query
// Implementation of AuthInfo for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
match self {
Self::OrgLevel { org_id } => {
builder
.add_filter_clause("organization_id", org_id)
.attach_printable("Error adding organization_id filter")?;
}
Self::MerchantLevel {
org_id,
merchant_ids,
} => {
builder
.add_filter_clause("organization_id", org_id)
.attach_printable("Error adding organization_id filter")?;
builder
.add_filter_in_range_clause("merchant_id", merchant_ids)
.attach_printable("Error adding merchant_id filter")?;
}
Self::ProfileLevel {
org_id,
merchant_id,
profile_ids,
} => {
builder
.add_filter_clause("organization_id", org_id)
.attach_printable("Error adding organization_id filter")?;
builder
.add_filter_clause("merchant_id", merchant_id)
.attach_printable("Error adding merchant_id filter")?;
builder
.add_filter_in_range_clause("profile_id", profile_ids)
.attach_printable("Error adding profile_id filter")?;
}
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 722,
"total_crates": null
} |
fn_clm_analytics_add_filter_clause_-2546719242055680872 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/query
// Inherent implementation for QueryBuilder<T>
pub fn add_filter_clause(
&mut self,
key: impl ToSql<T>,
value: impl ToSql<T>,
) -> QueryResult<()> {
self.add_custom_filter_clause(key, value, FilterTypes::Equal)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 356,
"total_crates": null
} |
fn_clm_analytics_new_-1819204752297367932 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/opensearch
// Inherent implementation for OpenSearchQueryBuilder
pub fn new(query_type: OpenSearchQuery, query: String, search_params: Vec<AuthInfo>) -> Self {
Self {
query_type,
query,
search_params,
offset: Default::default(),
count: Default::default(),
filters: Default::default(),
time_range: Default::default(),
case_sensitive_fields: HashSet::from([
"customer_email.keyword",
"search_tags.keyword",
"card_last_4.keyword",
"payment_id.keyword",
"amount",
"customer_id.keyword",
]),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 14473,
"total_crates": null
} |
fn_clm_analytics_default_-1819204752297367932 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/opensearch
// Implementation of OpenSearchConfig for Default
fn default() -> Self {
Self {
host: "https://localhost:9200".to_string(),
auth: OpenSearchAuth::Basic {
username: "admin".to_string(),
password: "admin".to_string(),
},
indexes: OpenSearchIndexes {
payment_attempts: "hyperswitch-payment-attempt-events".to_string(),
payment_intents: "hyperswitch-payment-intent-events".to_string(),
refunds: "hyperswitch-refund-events".to_string(),
disputes: "hyperswitch-dispute-events".to_string(),
sessionizer_payment_attempts: "sessionizer-payment-attempt-events".to_string(),
sessionizer_payment_intents: "sessionizer-payment-intent-events".to_string(),
sessionizer_refunds: "sessionizer-refund-events".to_string(),
sessionizer_disputes: "sessionizer-dispute-events".to_string(),
},
enabled: false,
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7725,
"total_crates": null
} |
fn_clm_analytics_switch_-1819204752297367932 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/opensearch
// Implementation of OpenSearchError for ErrorSwitch<ApiErrorResponse>
fn switch(&self) -> ApiErrorResponse {
match self {
Self::ConnectionError => ApiErrorResponse::InternalServerError(ApiError::new(
"IR",
0,
"Connection error",
None,
)),
Self::BadRequestError(response) => {
ApiErrorResponse::BadRequest(ApiError::new("IR", 1, response.to_string(), None))
}
Self::ResponseNotOK(response) => ApiErrorResponse::InternalServerError(ApiError::new(
"IR",
1,
format!("Something went wrong {response}"),
None,
)),
Self::ResponseError => ApiErrorResponse::InternalServerError(ApiError::new(
"IR",
2,
"Something went wrong",
None,
)),
Self::QueryBuildingError => ApiErrorResponse::InternalServerError(ApiError::new(
"IR",
3,
"Query building error",
None,
)),
Self::DeserialisationError => ApiErrorResponse::InternalServerError(ApiError::new(
"IR",
4,
"Deserialisation error",
None,
)),
Self::IndexAccessNotPermittedError(index) => {
ApiErrorResponse::ForbiddenCommonResource(ApiError::new(
"IR",
5,
format!("Index access not permitted: {index:?}"),
None,
))
}
Self::UnknownError => {
ApiErrorResponse::InternalServerError(ApiError::new("IR", 6, "Unknown error", None))
}
Self::AccessForbiddenError => ApiErrorResponse::ForbiddenCommonResource(ApiError::new(
"IR",
7,
"Access Forbidden error",
None,
)),
Self::NotEnabled => ApiErrorResponse::InternalServerError(ApiError::new(
"IR",
8,
"Opensearch is not enabled",
None,
)),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 3122,
"total_crates": null
} |
fn_clm_analytics_from_-1819204752297367932 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/opensearch
// Implementation of OpensearchTimeRange for From<TimeRange>
fn from(time_range: TimeRange) -> Self {
Self {
gte: time_range.start_time,
lte: time_range.end_time,
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 2600,
"total_crates": null
} |
fn_clm_analytics_add_filter_clause_-1819204752297367932 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/opensearch
// Inherent implementation for OpenSearchQueryBuilder
pub fn add_filter_clause(&mut self, lhs: String, rhs: Vec<Value>) -> QueryResult<()> {
self.filters.push((lhs, rhs));
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 356,
"total_crates": null
} |
fn_clm_analytics_switch_-1072737800367853787 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/errors
// Implementation of AnalyticsError for ErrorSwitch<ApiErrorResponse>
fn switch(&self) -> ApiErrorResponse {
match self {
Self::NotImplemented(feature) => ApiErrorResponse::NotImplemented(ApiError::new(
"IR",
0,
format!("{feature} is not implemented."),
None,
)),
Self::UnknownError => ApiErrorResponse::InternalServerError(ApiError::new(
"HE",
0,
"Something went wrong",
None,
)),
Self::AccessForbiddenError => {
ApiErrorResponse::Unauthorized(ApiError::new("IR", 0, "Access Forbidden", None))
}
Self::ForexFetchFailed => ApiErrorResponse::InternalServerError(ApiError::new(
"HE",
0,
"Failed to fetch currency exchange rate",
None,
)),
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 3096,
"total_crates": null
} |
fn_clm_analytics_msearch_results_-500375423886643255 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/search
pub async fn msearch_results(
client: &OpenSearchClient,
req: GetGlobalSearchRequest,
search_params: Vec<AuthInfo>,
indexes: Vec<SearchIndex>,
) -> CustomResult<Vec<GetSearchResponse>, OpenSearchError> {
if req.query.trim().is_empty()
&& req
.filters
.as_ref()
.is_none_or(|filters| filters.is_all_none())
{
return Err(OpenSearchError::BadRequestError(
"Both query and filters are empty".to_string(),
)
.into());
}
let mut query_builder = OpenSearchQueryBuilder::new(
OpenSearchQuery::Msearch(indexes.clone()),
req.query,
search_params,
);
if let Some(filters) = req.filters {
if let Some(currency) = filters.currency {
if !currency.is_empty() {
query_builder
.add_filter_clause("currency.keyword".to_string(), convert_to_value(currency))
.switch()?;
}
};
if let Some(status) = filters.status {
if !status.is_empty() {
query_builder
.add_filter_clause("status.keyword".to_string(), convert_to_value(status))
.switch()?;
}
};
if let Some(payment_method) = filters.payment_method {
if !payment_method.is_empty() {
query_builder
.add_filter_clause(
"payment_method.keyword".to_string(),
convert_to_value(payment_method),
)
.switch()?;
}
};
if let Some(customer_email) = filters.customer_email {
if !customer_email.is_empty() {
query_builder
.add_filter_clause(
"customer_email.keyword".to_string(),
convert_to_value(
customer_email
.iter()
.filter_map(|email| {
// TODO: Add trait based inputs instead of converting this to strings
serde_json::to_value(email)
.ok()
.and_then(|a| a.as_str().map(|a| a.to_string()))
})
.collect(),
),
)
.switch()?;
}
};
if let Some(search_tags) = filters.search_tags {
if !search_tags.is_empty() {
query_builder
.add_filter_clause(
"feature_metadata.search_tags.keyword".to_string(),
convert_to_value(
search_tags
.iter()
.filter_map(|search_tag| {
// TODO: Add trait based inputs instead of converting this to strings
serde_json::to_value(search_tag)
.ok()
.and_then(|a| a.as_str().map(|a| a.to_string()))
})
.collect(),
),
)
.switch()?;
}
};
if let Some(connector) = filters.connector {
if !connector.is_empty() {
query_builder
.add_filter_clause("connector.keyword".to_string(), convert_to_value(connector))
.switch()?;
}
};
if let Some(payment_method_type) = filters.payment_method_type {
if !payment_method_type.is_empty() {
query_builder
.add_filter_clause(
"payment_method_type.keyword".to_string(),
convert_to_value(payment_method_type),
)
.switch()?;
}
};
if let Some(card_network) = filters.card_network {
if !card_network.is_empty() {
query_builder
.add_filter_clause(
"card_network.keyword".to_string(),
convert_to_value(card_network),
)
.switch()?;
}
};
if let Some(card_last_4) = filters.card_last_4 {
if !card_last_4.is_empty() {
query_builder
.add_filter_clause(
"card_last_4.keyword".to_string(),
convert_to_value(card_last_4),
)
.switch()?;
}
};
if let Some(payment_id) = filters.payment_id {
if !payment_id.is_empty() {
query_builder
.add_filter_clause(
"payment_id.keyword".to_string(),
convert_to_value(payment_id),
)
.switch()?;
}
};
if let Some(amount) = filters.amount {
if !amount.is_empty() {
query_builder
.add_filter_clause("amount".to_string(), convert_to_value(amount))
.switch()?;
}
};
if let Some(customer_id) = filters.customer_id {
if !customer_id.is_empty() {
query_builder
.add_filter_clause(
"customer_id.keyword".to_string(),
convert_to_value(customer_id),
)
.switch()?;
}
};
};
if let Some(time_range) = req.time_range {
query_builder.set_time_range(time_range.into()).switch()?;
};
let response_text: OpenMsearchOutput = client
.execute(query_builder)
.await
.change_context(OpenSearchError::ConnectionError)?
.text()
.await
.change_context(OpenSearchError::ResponseError)
.and_then(|body: String| {
serde_json::from_str::<OpenMsearchOutput>(&body)
.change_context(OpenSearchError::DeserialisationError)
.attach_printable(body.clone())
})?;
let response_body: OpenMsearchOutput = response_text;
Ok(response_body
.responses
.into_iter()
.zip(indexes)
.map(|(index_hit, index)| match index_hit {
OpensearchOutput::Success(success) => GetSearchResponse {
count: success.hits.total.value,
index,
hits: success
.hits
.hits
.into_iter()
.map(|hit| hit.source)
.collect(),
status: SearchStatus::Success,
},
OpensearchOutput::Error(error) => {
tracing::error!(
index = ?index,
error_response = ?error,
"Search error"
);
GetSearchResponse {
count: 0,
index,
hits: Vec::new(),
status: SearchStatus::Failure,
}
}
})
.collect())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 229,
"total_crates": null
} |
fn_clm_analytics_search_results_-500375423886643255 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/search
pub async fn search_results(
client: &OpenSearchClient,
req: GetSearchRequestWithIndex,
search_params: Vec<AuthInfo>,
) -> CustomResult<GetSearchResponse, OpenSearchError> {
let search_req = req.search_req;
if search_req.query.trim().is_empty()
&& search_req
.filters
.as_ref()
.is_none_or(|filters| filters.is_all_none())
{
return Err(OpenSearchError::BadRequestError(
"Both query and filters are empty".to_string(),
)
.into());
}
let mut query_builder = OpenSearchQueryBuilder::new(
OpenSearchQuery::Search(req.index),
search_req.query,
search_params,
);
if let Some(filters) = search_req.filters {
if let Some(currency) = filters.currency {
if !currency.is_empty() {
query_builder
.add_filter_clause("currency.keyword".to_string(), convert_to_value(currency))
.switch()?;
}
};
if let Some(status) = filters.status {
if !status.is_empty() {
query_builder
.add_filter_clause("status.keyword".to_string(), convert_to_value(status))
.switch()?;
}
};
if let Some(payment_method) = filters.payment_method {
if !payment_method.is_empty() {
query_builder
.add_filter_clause(
"payment_method.keyword".to_string(),
convert_to_value(payment_method),
)
.switch()?;
}
};
if let Some(customer_email) = filters.customer_email {
if !customer_email.is_empty() {
query_builder
.add_filter_clause(
"customer_email.keyword".to_string(),
convert_to_value(
customer_email
.iter()
.filter_map(|email| {
// TODO: Add trait based inputs instead of converting this to strings
serde_json::to_value(email)
.ok()
.and_then(|a| a.as_str().map(|a| a.to_string()))
})
.collect(),
),
)
.switch()?;
}
};
if let Some(search_tags) = filters.search_tags {
if !search_tags.is_empty() {
query_builder
.add_filter_clause(
"feature_metadata.search_tags.keyword".to_string(),
convert_to_value(
search_tags
.iter()
.filter_map(|search_tag| {
// TODO: Add trait based inputs instead of converting this to strings
serde_json::to_value(search_tag)
.ok()
.and_then(|a| a.as_str().map(|a| a.to_string()))
})
.collect(),
),
)
.switch()?;
}
};
if let Some(connector) = filters.connector {
if !connector.is_empty() {
query_builder
.add_filter_clause("connector.keyword".to_string(), convert_to_value(connector))
.switch()?;
}
};
if let Some(payment_method_type) = filters.payment_method_type {
if !payment_method_type.is_empty() {
query_builder
.add_filter_clause(
"payment_method_type.keyword".to_string(),
convert_to_value(payment_method_type),
)
.switch()?;
}
};
if let Some(card_network) = filters.card_network {
if !card_network.is_empty() {
query_builder
.add_filter_clause(
"card_network.keyword".to_string(),
convert_to_value(card_network),
)
.switch()?;
}
};
if let Some(card_last_4) = filters.card_last_4 {
if !card_last_4.is_empty() {
query_builder
.add_filter_clause(
"card_last_4.keyword".to_string(),
convert_to_value(card_last_4),
)
.switch()?;
}
};
if let Some(payment_id) = filters.payment_id {
if !payment_id.is_empty() {
query_builder
.add_filter_clause(
"payment_id.keyword".to_string(),
convert_to_value(payment_id),
)
.switch()?;
}
};
if let Some(amount) = filters.amount {
if !amount.is_empty() {
query_builder
.add_filter_clause("amount".to_string(), convert_to_value(amount))
.switch()?;
}
};
if let Some(customer_id) = filters.customer_id {
if !customer_id.is_empty() {
query_builder
.add_filter_clause(
"customer_id.keyword".to_string(),
convert_to_value(customer_id),
)
.switch()?;
}
};
};
if let Some(time_range) = search_req.time_range {
query_builder.set_time_range(time_range.into()).switch()?;
};
query_builder
.set_offset_n_count(search_req.offset, search_req.count)
.switch()?;
let response_text: OpensearchOutput = client
.execute(query_builder)
.await
.change_context(OpenSearchError::ConnectionError)?
.text()
.await
.change_context(OpenSearchError::ResponseError)
.and_then(|body: String| {
serde_json::from_str::<OpensearchOutput>(&body)
.change_context(OpenSearchError::DeserialisationError)
.attach_printable(body.clone())
})?;
let response_body: OpensearchOutput = response_text;
match response_body {
OpensearchOutput::Success(success) => Ok(GetSearchResponse {
count: success.hits.total.value,
index: req.index,
hits: success
.hits
.hits
.into_iter()
.map(|hit| hit.source)
.collect(),
status: SearchStatus::Success,
}),
OpensearchOutput::Error(error) => {
tracing::error!(
index = ?req.index,
error_response = ?error,
"Search error"
);
Ok(GetSearchResponse {
count: 0,
index: req.index,
hits: Vec::new(),
status: SearchStatus::Failure,
})
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 223,
"total_crates": null
} |
fn_clm_analytics_convert_to_value_-500375423886643255 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/search
pub fn convert_to_value<T: Into<Value>>(items: Vec<T>) -> Vec<Value> {
items.into_iter().map(|item| item.into()).collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 90,
"total_crates": null
} |
fn_clm_analytics_get_payment_dimensions_830339345091257740 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/utils
pub fn get_payment_dimensions() -> Vec<NameDescription> {
vec![
PaymentDimensions::Connector,
PaymentDimensions::PaymentMethod,
PaymentDimensions::PaymentMethodType,
PaymentDimensions::Currency,
PaymentDimensions::AuthType,
PaymentDimensions::PaymentStatus,
PaymentDimensions::ClientSource,
PaymentDimensions::ClientVersion,
PaymentDimensions::ProfileId,
PaymentDimensions::CardNetwork,
PaymentDimensions::MerchantId,
PaymentDimensions::RoutingApproach,
]
.into_iter()
.map(Into::into)
.collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_analytics_get_payment_metrics_info_830339345091257740 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/utils
pub fn get_payment_metrics_info() -> Vec<NameDescription> {
PaymentMetrics::iter().map(Into::into).collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_analytics_get_payment_intent_dimensions_830339345091257740 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/utils
pub fn get_payment_intent_dimensions() -> Vec<NameDescription> {
vec![
PaymentIntentDimensions::PaymentIntentStatus,
PaymentIntentDimensions::Currency,
PaymentIntentDimensions::ProfileId,
PaymentIntentDimensions::Connector,
PaymentIntentDimensions::AuthType,
PaymentIntentDimensions::PaymentMethod,
PaymentIntentDimensions::PaymentMethodType,
PaymentIntentDimensions::CardNetwork,
PaymentIntentDimensions::MerchantId,
]
.into_iter()
.map(Into::into)
.collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 19,
"total_crates": null
} |
fn_clm_analytics_get_auth_event_dimensions_830339345091257740 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/utils
pub fn get_auth_event_dimensions() -> Vec<NameDescription> {
vec![
AuthEventDimensions::AuthenticationConnector,
AuthEventDimensions::MessageVersion,
AuthEventDimensions::AcsReferenceNumber,
AuthEventDimensions::Platform,
AuthEventDimensions::Mcc,
AuthEventDimensions::Currency,
AuthEventDimensions::MerchantCountry,
AuthEventDimensions::BillingCountry,
AuthEventDimensions::ShippingCountry,
AuthEventDimensions::IssuerCountry,
AuthEventDimensions::IssuerId,
AuthEventDimensions::EarliestSupportedVersion,
AuthEventDimensions::LatestSupportedVersion,
AuthEventDimensions::WhitelistDecision,
AuthEventDimensions::DeviceManufacturer,
AuthEventDimensions::DeviceType,
AuthEventDimensions::DeviceBrand,
AuthEventDimensions::DeviceOs,
AuthEventDimensions::DeviceDisplay,
AuthEventDimensions::BrowserName,
AuthEventDimensions::BrowserVersion,
AuthEventDimensions::SchemeName,
AuthEventDimensions::ExemptionRequested,
AuthEventDimensions::ExemptionAccepted,
]
.into_iter()
.map(Into::into)
.collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 19,
"total_crates": null
} |
fn_clm_analytics_get_refund_dimensions_830339345091257740 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/utils
pub fn get_refund_dimensions() -> Vec<NameDescription> {
RefundDimensions::iter().map(Into::into).collect()
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 19,
"total_crates": null
} |
fn_clm_analytics_outgoing_webhook_events_core_-6059414954761142765 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/outgoing_webhook_event/core
pub async fn outgoing_webhook_events_core(
pool: &AnalyticsProvider,
req: OutgoingWebhookLogsRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<Vec<OutgoingWebhookLogsResult>> {
let data = match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"Outgoing Webhook Events Logs not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for Outgoing Webhook Events"),
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
get_outgoing_webhook_event(merchant_id, req, ckh_pool).await
}
}
.switch()?;
Ok(data)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 21,
"total_crates": null
} |
fn_clm_analytics_get_outgoing_webhook_event_3805413177523808246 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/outgoing_webhook_event/events
pub async fn get_outgoing_webhook_event<T>(
merchant_id: &common_utils::id_type::MerchantId,
query_param: OutgoingWebhookLogsRequest,
pool: &T,
) -> FiltersResult<Vec<OutgoingWebhookLogsResult>>
where
T: AnalyticsDataSource + OutgoingWebhookLogsFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::OutgoingWebhookEvent);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder
.add_filter_clause("payment_id", &query_param.payment_id)
.switch()?;
if let Some(event_id) = query_param.event_id {
query_builder
.add_filter_clause("event_id", &event_id)
.switch()?;
}
if let Some(refund_id) = query_param.refund_id {
query_builder
.add_filter_clause("refund_id", &refund_id)
.switch()?;
}
if let Some(dispute_id) = query_param.dispute_id {
query_builder
.add_filter_clause("dispute_id", &dispute_id)
.switch()?;
}
if let Some(mandate_id) = query_param.mandate_id {
query_builder
.add_filter_clause("mandate_id", &mandate_id)
.switch()?;
}
if let Some(payment_method_id) = query_param.payment_method_id {
query_builder
.add_filter_clause("payment_method_id", &payment_method_id)
.switch()?;
}
if let Some(attempt_id) = query_param.attempt_id {
query_builder
.add_filter_clause("attempt_id", &attempt_id)
.switch()?;
}
//TODO!: update the execute_query function to return reports instead of plain errors...
query_builder
.execute_query::<OutgoingWebhookLogsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 55,
"total_crates": null
} |
fn_clm_analytics_record_operation_time_-9144294906950723961 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/metrics/request
pub async fn record_operation_time<F, R, T>(
future: F,
metric: &router_env::opentelemetry::metrics::Histogram<f64>,
metric_name: &T,
source: &crate::AnalyticsProvider,
) -> R
where
F: futures::Future<Output = R>,
T: ToString,
{
let (result, time) = common_utils::metrics::utils::time_future(future).await;
let attributes = router_env::metric_attributes!(
("metric_name", metric_name.to_string()),
("source", source.to_string()),
);
let value = time.as_secs_f64();
metric.record(value, attributes);
router_env::logger::debug!("Attributes: {:?}, Time: {}", attributes, value);
result
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 79,
"total_crates": null
} |
fn_clm_analytics_get_metrics_7878981876528441595 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/core
pub async fn get_metrics(
pool: &AnalyticsProvider,
ex_rates: &Option<ExchangeRates>,
auth: &AuthInfo,
req: GetPaymentMetricRequest,
) -> AnalyticsResult<PaymentsMetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
PaymentMetricsBucketIdentifier,
PaymentMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_payments_metrics_query",
payment_metric = metric_type.as_ref()
);
// TODO: lifetime issues with joinset,
// can be optimized away if joinset lifetime requirements are relaxed
let auth_scoped = auth.to_owned();
set.spawn(
async move {
let data = pool
.get_payment_metrics(
&metric_type,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
TaskType::MetricTask(metric_type, data)
}
.instrument(task_span),
);
}
if let Some(distribution) = req.clone().distribution {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_payments_distribution_query",
payment_distribution = distribution.distribution_for.as_ref()
);
let auth_scoped = auth.to_owned();
set.spawn(
async move {
let data = pool
.get_payment_distribution(
&distribution,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
TaskType::DistributionTask(distribution.distribution_for, data)
}
.instrument(task_span),
);
}
while let Some(task_type) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
match task_type {
TaskType::MetricTask(metric, data) => {
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
PaymentMetrics::PaymentSuccessRate
| PaymentMetrics::SessionizedPaymentSuccessRate => metrics_builder
.payment_success_rate
.add_metrics_bucket(&value),
PaymentMetrics::PaymentCount | PaymentMetrics::SessionizedPaymentCount => {
metrics_builder.payment_count.add_metrics_bucket(&value)
}
PaymentMetrics::PaymentSuccessCount
| PaymentMetrics::SessionizedPaymentSuccessCount => {
metrics_builder.payment_success.add_metrics_bucket(&value)
}
PaymentMetrics::PaymentProcessedAmount
| PaymentMetrics::SessionizedPaymentProcessedAmount => {
metrics_builder.processed_amount.add_metrics_bucket(&value)
}
PaymentMetrics::AvgTicketSize
| PaymentMetrics::SessionizedAvgTicketSize => {
metrics_builder.avg_ticket_size.add_metrics_bucket(&value)
}
PaymentMetrics::RetriesCount | PaymentMetrics::SessionizedRetriesCount => {
metrics_builder.retries_count.add_metrics_bucket(&value);
metrics_builder
.retries_amount_processed
.add_metrics_bucket(&value)
}
PaymentMetrics::ConnectorSuccessRate
| PaymentMetrics::SessionizedConnectorSuccessRate => {
metrics_builder
.connector_success_rate
.add_metrics_bucket(&value);
}
PaymentMetrics::DebitRouting | PaymentMetrics::SessionizedDebitRouting => {
metrics_builder.debit_routing.add_metrics_bucket(&value);
}
PaymentMetrics::PaymentsDistribution => {
metrics_builder
.payments_distribution
.add_metrics_bucket(&value);
}
PaymentMetrics::FailureReasons => {
metrics_builder
.failure_reasons_distribution
.add_metrics_bucket(&value);
}
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
TaskType::DistributionTask(distribution, data) => {
let data = data?;
let attributes = router_env::metric_attributes!(
("distribution_type", distribution.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for distribution {distribution}");
let metrics_accumulator = metrics_accumulator.entry(id).or_default();
match distribution {
PaymentDistributions::PaymentErrorMessage => metrics_accumulator
.payment_error_message
.add_distribution_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: distribution: {}, results: {:#?}",
distribution,
metrics_accumulator
);
}
}
}
let mut total_payment_processed_amount = 0;
let mut total_payment_processed_count = 0;
let mut total_payment_processed_amount_without_smart_retries = 0;
let mut total_payment_processed_count_without_smart_retries = 0;
let mut total_failure_reasons_count = 0;
let mut total_failure_reasons_count_without_smart_retries = 0;
let mut total_payment_processed_amount_in_usd = 0;
let mut total_payment_processed_amount_without_smart_retries_usd = 0;
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| {
let mut collected_values = val.collect();
if let Some(amount) = collected_values.payment_processed_amount {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.payment_processed_amount_in_usd = amount_in_usd;
total_payment_processed_amount += amount;
total_payment_processed_amount_in_usd += amount_in_usd.unwrap_or(0);
}
if let Some(count) = collected_values.payment_processed_count {
total_payment_processed_count += count;
}
if let Some(amount) = collected_values.payment_processed_amount_without_smart_retries {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.payment_processed_amount_without_smart_retries_usd = amount_in_usd;
total_payment_processed_amount_without_smart_retries += amount;
total_payment_processed_amount_without_smart_retries_usd +=
amount_in_usd.unwrap_or(0);
}
if let Some(count) = collected_values.payment_processed_count_without_smart_retries {
total_payment_processed_count_without_smart_retries += count;
}
if let Some(count) = collected_values.failure_reason_count {
total_failure_reasons_count += count;
}
if let Some(count) = collected_values.failure_reason_count_without_smart_retries {
total_failure_reasons_count_without_smart_retries += count;
}
if let Some(savings) = collected_values.debit_routing_savings {
let savings_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(savings)
.inspect_err(|e| {
logger::error!(
"Debit Routing savings conversion error: {:?}",
e
)
})
.ok()
.and_then(|savings_i64| {
convert(ex_rates, currency, Currency::USD, savings_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|savings| (savings * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.debit_routing_savings_in_usd = savings_in_usd;
}
MetricsBucketResponse {
values: collected_values,
dimensions: id,
}
})
.collect();
Ok(PaymentsMetricsResponse {
query_data,
meta_data: [PaymentsAnalyticsMetadata {
total_payment_processed_amount: Some(total_payment_processed_amount),
total_payment_processed_amount_in_usd: if ex_rates.is_some() {
Some(total_payment_processed_amount_in_usd)
} else {
None
},
total_payment_processed_amount_without_smart_retries: Some(
total_payment_processed_amount_without_smart_retries,
),
total_payment_processed_amount_without_smart_retries_usd: if ex_rates.is_some() {
Some(total_payment_processed_amount_without_smart_retries_usd)
} else {
None
},
total_payment_processed_count: Some(total_payment_processed_count),
total_payment_processed_count_without_smart_retries: Some(
total_payment_processed_count_without_smart_retries,
),
total_failure_reasons_count: Some(total_failure_reasons_count),
total_failure_reasons_count_without_smart_retries: Some(
total_failure_reasons_count_without_smart_retries,
),
}],
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 252,
"total_crates": null
} |
fn_clm_analytics_get_filters_7878981876528441595 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/core
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetPaymentFiltersRequest,
auth: &AuthInfo,
) -> AnalyticsResult<PaymentFiltersResponse> {
let mut res = PaymentFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_payment_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_payment_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_poll, ckh_pool) => {
let ckh_result = get_payment_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_payment_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_poll,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_poll, ckh_pool) => {
let ckh_result = get_payment_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_payment_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_poll,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payments analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: PaymentFilterRow| match dim {
PaymentDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()),
PaymentDimensions::PaymentStatus => fil.status.map(|i| i.as_ref().to_string()),
PaymentDimensions::Connector => fil.connector,
PaymentDimensions::AuthType => fil.authentication_type.map(|i| i.as_ref().to_string()),
PaymentDimensions::PaymentMethod => fil.payment_method,
PaymentDimensions::PaymentMethodType => fil.payment_method_type,
PaymentDimensions::ClientSource => fil.client_source,
PaymentDimensions::ClientVersion => fil.client_version,
PaymentDimensions::ProfileId => fil.profile_id,
PaymentDimensions::CardNetwork => fil.card_network,
PaymentDimensions::MerchantId => fil.merchant_id,
PaymentDimensions::CardLast4 => fil.card_last_4,
PaymentDimensions::CardIssuer => fil.card_issuer,
PaymentDimensions::ErrorReason => fil.error_reason,
PaymentDimensions::RoutingApproach => fil.routing_approach.map(|i| i.as_ref().to_string()),
PaymentDimensions::SignatureNetwork => fil.signature_network,
PaymentDimensions::IsIssuerRegulated => fil.is_issuer_regulated.map(|b| b.to_string()),
PaymentDimensions::IsDebitRouted => fil.is_debit_routed.map(|b| b.to_string())
})
.collect::<Vec<String>>();
res.query_data.push(FilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 112,
"total_crates": null
} |
fn_clm_analytics_load_distribution_8992984777719461577 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/distribution
// Implementation of PaymentDistributions for PaymentDistribution<T>
async fn load_distribution(
&self,
distribution: &PaymentDistributionBody,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(PaymentMetricsBucketIdentifier, PaymentDistributionRow)>> {
match self {
Self::PaymentErrorMessage => {
PaymentErrorMessage
.load_distribution(
distribution,
dimensions,
auth,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 31,
"total_crates": null
} |
fn_clm_analytics_set_filter_clause_258350410479627385 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/types
// Implementation of PaymentFilters for QueryFilter<T>
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.currency.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::Currency, &self.currency)
.attach_printable("Error adding currency filter")?;
}
if !self.status.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::PaymentStatus, &self.status)
.attach_printable("Error adding payment status filter")?;
}
if !self.connector.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::Connector, &self.connector)
.attach_printable("Error adding connector filter")?;
}
if !self.auth_type.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::AuthType, &self.auth_type)
.attach_printable("Error adding auth type filter")?;
}
if !self.payment_method.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::PaymentMethod, &self.payment_method)
.attach_printable("Error adding payment method filter")?;
}
if !self.payment_method_type.is_empty() {
builder
.add_filter_in_range_clause(
PaymentDimensions::PaymentMethodType,
&self.payment_method_type,
)
.attach_printable("Error adding payment method type filter")?;
}
if !self.client_source.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::ClientSource, &self.client_source)
.attach_printable("Error adding client source filter")?;
}
if !self.client_version.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::ClientVersion, &self.client_version)
.attach_printable("Error adding client version filter")?;
}
if !self.profile_id.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::ProfileId, &self.profile_id)
.attach_printable("Error adding profile id filter")?;
}
if !self.card_network.is_empty() {
let card_networks: Vec<String> = self
.card_network
.iter()
.flat_map(|cn| {
[
format!("\"{cn}\""),
cn.to_string(),
format!("\"{cn}\"").to_uppercase(),
]
})
.collect();
builder
.add_filter_in_range_clause(
PaymentDimensions::CardNetwork,
card_networks.as_slice(),
)
.attach_printable("Error adding card network filter")?;
}
if !self.merchant_id.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::MerchantId, &self.merchant_id)
.attach_printable("Error adding merchant id filter")?;
}
if !self.card_last_4.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::CardLast4, &self.card_last_4)
.attach_printable("Error adding card last 4 filter")?;
}
if !self.card_issuer.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::CardIssuer, &self.card_issuer)
.attach_printable("Error adding card issuer filter")?;
}
if !self.error_reason.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::ErrorReason, &self.error_reason)
.attach_printable("Error adding error reason filter")?;
}
if !self.first_attempt.is_empty() {
builder
.add_filter_in_range_clause("first_attempt", &self.first_attempt)
.attach_printable("Error adding first attempt filter")?;
}
if !self.routing_approach.is_empty() {
builder
.add_filter_in_range_clause(
PaymentDimensions::RoutingApproach,
&self.routing_approach,
)
.attach_printable("Error adding routing approach filter")?;
}
if !self.signature_network.is_empty() {
builder
.add_filter_in_range_clause(
PaymentDimensions::SignatureNetwork,
&self.signature_network,
)
.attach_printable("Error adding signature network filter")?;
}
if !self.is_issuer_regulated.is_empty() {
builder
.add_filter_in_range_clause(
PaymentDimensions::IsIssuerRegulated,
&self.is_issuer_regulated,
)
.attach_printable("Error adding is issuer regulated filter")?;
}
if !self.is_debit_routed.is_empty() {
builder
.add_filter_in_range_clause(PaymentDimensions::IsDebitRouted, &self.is_debit_routed)
.attach_printable("Error adding is debit routed filter")?;
}
Ok(())
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 824,
"total_crates": null
} |
fn_clm_analytics_load_metrics_4712289430113084010 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics
// Implementation of PaymentMetrics for PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
match self {
Self::PaymentSuccessRate => {
PaymentSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentCount => {
PaymentCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentSuccessCount => {
PaymentSuccessCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentProcessedAmount => {
PaymentProcessedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::AvgTicketSize => {
AvgTicketSize
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::RetriesCount => {
RetriesCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::ConnectorSuccessRate => {
ConnectorSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::DebitRouting => {
DebitRouting
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentSuccessRate => {
sessionized_metrics::PaymentSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentCount => {
sessionized_metrics::PaymentCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentSuccessCount => {
sessionized_metrics::PaymentSuccessCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentProcessedAmount => {
sessionized_metrics::PaymentProcessedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedAvgTicketSize => {
sessionized_metrics::AvgTicketSize
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedRetriesCount => {
sessionized_metrics::RetriesCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedConnectorSuccessRate => {
sessionized_metrics::ConnectorSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentsDistribution => {
sessionized_metrics::PaymentsDistribution
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::FailureReasons => {
sessionized_metrics::FailureReasons
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedDebitRouting => {
sessionized_metrics::DebitRouting
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 314,
"total_crates": null
} |
fn_clm_analytics_get_payment_filter_for_dimension_3042809228084294715 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/filters
pub async fn get_payment_filter_for_dimension<T>(
dimension: PaymentDimensions,
auth: &AuthInfo,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<PaymentFilterRow>>
where
T: AnalyticsDataSource + PaymentFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<PaymentFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 50,
"total_crates": null
} |
fn_clm_analytics_collect_2936854367929365132 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/accumulator
// Inherent implementation for PaymentMetricsAccumulator
pub fn collect(self) -> PaymentMetricsBucketValue {
let (
payment_processed_amount,
payment_processed_count,
payment_processed_amount_without_smart_retries,
payment_processed_count_without_smart_retries,
payment_processed_amount_in_usd,
payment_processed_amount_without_smart_retries_usd,
) = self.processed_amount.collect();
let (
payments_success_rate_distribution,
payments_success_rate_distribution_without_smart_retries,
payments_success_rate_distribution_with_only_retries,
payments_failure_rate_distribution,
payments_failure_rate_distribution_without_smart_retries,
payments_failure_rate_distribution_with_only_retries,
) = self.payments_distribution.collect();
let (failure_reason_count, failure_reason_count_without_smart_retries) =
self.failure_reasons_distribution.collect();
let (debit_routed_transaction_count, debit_routing_savings, debit_routing_savings_in_usd) =
self.debit_routing.collect();
PaymentMetricsBucketValue {
payment_success_rate: self.payment_success_rate.collect(),
payment_count: self.payment_count.collect(),
payment_success_count: self.payment_success.collect(),
payment_processed_amount,
payment_processed_count,
payment_processed_amount_without_smart_retries,
payment_processed_count_without_smart_retries,
avg_ticket_size: self.avg_ticket_size.collect(),
payment_error_message: self.payment_error_message.collect(),
retries_count: self.retries_count.collect(),
retries_amount_processed: self.retries_amount_processed.collect(),
connector_success_rate: self.connector_success_rate.collect(),
payments_success_rate_distribution,
payments_success_rate_distribution_without_smart_retries,
payments_success_rate_distribution_with_only_retries,
payments_failure_rate_distribution,
payments_failure_rate_distribution_without_smart_retries,
payments_failure_rate_distribution_with_only_retries,
failure_reason_count,
failure_reason_count_without_smart_retries,
payment_processed_amount_in_usd,
payment_processed_amount_without_smart_retries_usd,
debit_routed_transaction_count,
debit_routing_savings,
debit_routing_savings_in_usd,
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1455,
"total_crates": null
} |
fn_clm_analytics_add_metrics_bucket_2936854367929365132 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/accumulator
// Implementation of AverageAccumulator for PaymentMetricAccumulator
fn add_metrics_bucket(&mut self, metrics: &PaymentMetricRow) {
let total = metrics.total.as_ref().and_then(ToPrimitive::to_u32);
let count = metrics.count.and_then(|total| u32::try_from(total).ok());
match (total, count) {
(Some(total), Some(count)) => {
self.total += total;
self.count += count;
}
_ => {
logger::error!(message="Dropping metrics for average accumulator", metric=?metrics);
}
}
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 168,
"total_crates": null
} |
fn_clm_analytics_add_distribution_bucket_2936854367929365132 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/accumulator
// Implementation of ErrorDistributionAccumulator for PaymentDistributionAccumulator
fn add_distribution_bucket(&mut self, distribution: &PaymentDistributionRow) {
self.error_vec.push(ErrorDistributionRow {
count: distribution.count.unwrap_or_default(),
total: distribution
.total
.clone()
.map(|i| i.to_i64().unwrap_or_default())
.unwrap_or_default(),
error_message: distribution.error_message.clone().unwrap_or("".to_string()),
})
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 37,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-3643584475922647598 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/connector_success_rate
// Implementation of ConnectorSuccessRate for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause(PaymentDimensions::Connector, "NULL", FilterTypes::IsNotNull)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 396,
"total_crates": null
} |
fn_clm_analytics_load_metrics_8455848674610677125 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/retries_count
// Implementation of RetriesCount for super::PaymentMetric<T>
async fn load_metrics(
&self,
_dimensions: &[PaymentDimensions],
auth: &AuthInfo,
_filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 382,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-5690155128889549279 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/success_rate
// Implementation of PaymentSuccessRate for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 392,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-8743522395397081442 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/avg_ticket_size
// Implementation of AvgTicketSize for PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 400,
"total_crates": null
} |
fn_clm_analytics_load_metrics_1662836059822213116 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/debit_routing
// Implementation of DebitRouting for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "debit_routing_savings",
alias: Some("total"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 406,
"total_crates": null
} |
fn_clm_analytics_load_metrics_4909920767041659234 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/payment_count
// Implementation of PaymentCount for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 392,
"total_crates": null
} |
fn_clm_analytics_load_metrics_490302644630985440 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/payment_success_count
// Implementation of PaymentSuccessCount for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 392,
"total_crates": null
} |
fn_clm_analytics_load_metrics_-8811167062278585568 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/payment_processed_amount
// Implementation of PaymentProcessedAmount for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 402,
"total_crates": null
} |
fn_clm_analytics_load_metrics_8412783985974037779 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/failure_reasons
// Implementation of FailureReasons for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut inner_query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
inner_query_builder
.add_select_column("sum(sign_flag)")
.switch()?;
inner_query_builder
.add_custom_filter_clause(
PaymentDimensions::ErrorReason,
"NULL",
FilterTypes::IsNotNull,
)
.switch()?;
time_range
.set_filter_clause(&mut inner_query_builder)
.attach_printable("Error filtering time range for inner query")
.switch()?;
let inner_query_string = inner_query_builder
.build_query()
.attach_printable("Error building inner query")
.change_context(MetricsError::QueryBuildingError)?;
let mut outer_query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
outer_query_builder.add_select_column(dim).switch()?;
}
outer_query_builder
.add_select_column("sum(sign_flag) AS count")
.switch()?;
outer_query_builder
.add_select_column(format!("({inner_query_string}) AS total"))
.switch()?;
outer_query_builder
.add_select_column("first_attempt")
.switch()?;
outer_query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
outer_query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters
.set_filter_clause(&mut outer_query_builder)
.switch()?;
auth.set_filter_clause(&mut outer_query_builder).switch()?;
time_range
.set_filter_clause(&mut outer_query_builder)
.attach_printable("Error filtering time range for outer query")
.switch()?;
outer_query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Failure,
)
.switch()?;
outer_query_builder
.add_custom_filter_clause(
PaymentDimensions::ErrorReason,
"NULL",
FilterTypes::IsNotNull,
)
.switch()?;
for dim in dimensions.iter() {
outer_query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
outer_query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut outer_query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
outer_query_builder
.add_order_by_clause("count", Order::Descending)
.attach_printable("Error adding order by clause")
.switch()?;
let filtered_dimensions: Vec<&PaymentDimensions> = dimensions
.iter()
.filter(|&&dim| dim != PaymentDimensions::ErrorReason)
.collect();
for dim in &filtered_dimensions {
outer_query_builder
.add_order_by_clause(*dim, Order::Ascending)
.attach_printable("Error adding order by clause")
.switch()?;
}
outer_query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 450,
"total_crates": null
} |
fn_clm_analytics_load_metrics_858105518302950045 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/connector_success_rate
// Implementation of ConnectorSuccessRate for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause(PaymentDimensions::Connector, "NULL", FilterTypes::IsNotNull)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 396,
"total_crates": null
} |
fn_clm_analytics_load_metrics_6270694272930952615 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/retries_count
// Implementation of RetriesCount for super::PaymentMetric<T>
async fn load_metrics(
&self,
_dimensions: &[PaymentDimensions],
auth: &AuthInfo,
_filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 382,
"total_crates": null
} |
fn_clm_analytics_load_metrics_3635360665670442104 | clm | function | // Repository: hyperswitch
// Crate: analytics
// Purpose: Event logging with Kafka and ClickHouse
// Module: crates/analytics/src/payments/metrics/sessionized_metrics/payments_distribution
// Implementation of PaymentsDistribution for super::PaymentMetric<T>
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder.add_select_column("first_attempt").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
| {
"crate": "analytics",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 402,
"total_crates": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.