id stringlengths 11 116 | type stringclasses 1 value | granularity stringclasses 4 values | content stringlengths 16 477k | metadata dict |
|---|---|---|---|---|
fn_clm_diesel_models_find_by_merchant_id_5045372760619588473 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/merchant_key_store
// Inherent implementation for MerchantKeyStore
pub async fn find_by_merchant_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::merchant_id.eq(merchant_id.to_owned()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_diesel_models_delete_by_merchant_id_5045372760619588473 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/merchant_key_store
// Inherent implementation for MerchantKeyStore
pub async fn delete_by_merchant_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<bool> {
generics::generic_delete::<<Self as HasTable>::Table, _>(
conn,
dsl::merchant_id.eq(merchant_id.to_owned()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_diesel_models_insert_-1356326283614083829 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_key_store
// Inherent implementation for UserKeyStoreNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<UserKeyStore> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_get_all_user_key_stores_-1356326283614083829 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_key_store
// Inherent implementation for UserKeyStore
pub async fn get_all_user_key_stores(
conn: &PgPooledConn,
from: u32,
limit: u32,
) -> StorageResult<Vec<Self>> {
generics::generic_filter::<
<Self as HasTable>::Table,
_,
<<Self as HasTable>::Table as diesel::Table>::PrimaryKey,
_,
>(
conn,
dsl::user_id.ne_all(vec!["".to_string()]),
Some(limit.into()),
Some(from.into()),
None,
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 24,
"total_crates": null
} |
fn_clm_diesel_models_find_by_user_id_-1356326283614083829 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_key_store
// Inherent implementation for UserKeyStore
pub async fn find_by_user_id(conn: &PgPooledConn, user_id: &str) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::user_id.eq(user_id.to_owned()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_diesel_models_insert_257616260232684217 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/reverse_lookup
// Inherent implementation for ReverseLookupNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<ReverseLookup> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_find_by_lookup_id_257616260232684217 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/reverse_lookup
// Inherent implementation for ReverseLookup
pub async fn find_by_lookup_id(lookup_id: &str, conn: &PgPooledConn) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::lookup_id.eq(lookup_id.to_owned()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_diesel_models_batch_insert_257616260232684217 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/reverse_lookup
// Inherent implementation for ReverseLookupNew
pub async fn batch_insert(
reverse_lookups: Vec<Self>,
conn: &PgPooledConn,
) -> StorageResult<()> {
generics::generic_insert::<_, _, ReverseLookup>(conn, reverse_lookups).await?;
Ok(())
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 18,
"total_crates": null
} |
fn_clm_diesel_models_insert_9030172355535656370 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/relay
// Inherent implementation for RelayNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<Relay> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_update_9030172355535656370 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/relay
// Inherent implementation for Relay
pub async fn update(
self,
conn: &PgPooledConn,
relay: RelayUpdateInternal,
) -> StorageResult<Self> {
match generics::generic_update_with_unique_predicate_get_result::<
<Self as HasTable>::Table,
_,
_,
_,
>(conn, dsl::id.eq(self.id.to_owned()), relay)
.await
{
Err(error) => match error.current_context() {
errors::DatabaseError::NoFieldsToUpdate => Ok(self),
_ => Err(error),
},
result => result,
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 93,
"total_crates": null
} |
fn_clm_diesel_models_find_by_profile_id_connector_reference_id_9030172355535656370 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/relay
// Inherent implementation for Relay
pub async fn find_by_profile_id_connector_reference_id(
conn: &PgPooledConn,
profile_id: &common_utils::id_type::ProfileId,
connector_reference_id: &str,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::profile_id
.eq(profile_id.to_owned())
.and(dsl::connector_reference_id.eq(connector_reference_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 28,
"total_crates": null
} |
fn_clm_diesel_models_find_by_id_9030172355535656370 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/relay
// Inherent implementation for Relay
pub async fn find_by_id(
conn: &PgPooledConn,
id: &common_utils::id_type::RelayId,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::id.eq(id.to_owned()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 25,
"total_crates": null
} |
fn_clm_diesel_models_insert_-8867065366149239893 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/mandate
// Inherent implementation for MandateNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<Mandate> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_update_by_merchant_id_mandate_id_-8867065366149239893 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/mandate
// Inherent implementation for Mandate
pub async fn update_by_merchant_id_mandate_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
mandate_id: &str,
mandate: MandateUpdateInternal,
) -> StorageResult<Self> {
generics::generic_update_with_results::<<Self as HasTable>::Table, _, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::mandate_id.eq(mandate_id.to_owned())),
mandate,
)
.await?
.first()
.cloned()
.ok_or_else(|| {
report!(errors::DatabaseError::NotFound)
.attach_printable("Error while updating mandate")
})
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 39,
"total_crates": null
} |
fn_clm_diesel_models_find_by_merchant_id_mandate_id_-8867065366149239893 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/mandate
// Inherent implementation for Mandate
pub async fn find_by_merchant_id_mandate_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
mandate_id: &str,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::mandate_id.eq(mandate_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 28,
"total_crates": null
} |
fn_clm_diesel_models_find_by_merchant_id_connector_mandate_id_-8867065366149239893 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/mandate
// Inherent implementation for Mandate
pub async fn find_by_merchant_id_connector_mandate_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
connector_mandate_id: &str,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::connector_mandate_id.eq(connector_mandate_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 28,
"total_crates": null
} |
fn_clm_diesel_models_find_by_merchant_id_customer_id_-8867065366149239893 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/mandate
// Inherent implementation for Mandate
pub async fn find_by_merchant_id_customer_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
customer_id: &common_utils::id_type::CustomerId,
) -> StorageResult<Vec<Self>> {
generics::generic_filter::<
<Self as HasTable>::Table,
_,
<<Self as HasTable>::Table as Table>::PrimaryKey,
_,
>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::customer_id.eq(customer_id.to_owned())),
None,
None,
None,
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 28,
"total_crates": null
} |
fn_clm_diesel_models_get_local_unique_key_-3735230638181464129 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/utils
// Implementation of <schema_v2::blocklist::table as diesel::Table>::PrimaryKey for CompositeKey
fn get_local_unique_key(&self) -> Self::UK {
self.1
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 8,
"total_crates": null
} |
fn_clm_diesel_models_insert_-7217105875194163693 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/payment_intent
// Inherent implementation for PaymentIntentNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<PaymentIntent> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_update_-7217105875194163693 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/payment_intent
// Inherent implementation for PaymentIntent
pub async fn update(
self,
conn: &PgPooledConn,
payment_intent: payment_intent::PaymentIntentUpdate,
) -> StorageResult<Self> {
match generics::generic_update_with_results::<<Self as HasTable>::Table, _, _, _>(
conn,
dsl::payment_id
.eq(self.payment_id.to_owned())
.and(dsl::merchant_id.eq(self.merchant_id.to_owned())),
payment_intent::PaymentIntentUpdateInternal::from(payment_intent),
)
.await
{
Err(error) => match error.current_context() {
errors::DatabaseError::NoFieldsToUpdate => Ok(self),
_ => Err(error),
},
Ok(mut payment_intents) => payment_intents
.pop()
.ok_or(error_stack::report!(errors::DatabaseError::NotFound)),
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 105,
"total_crates": null
} |
fn_clm_diesel_models_find_by_merchant_reference_id_profile_id_-7217105875194163693 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/payment_intent
// Inherent implementation for PaymentIntent
pub async fn find_by_merchant_reference_id_profile_id(
conn: &PgPooledConn,
merchant_reference_id: &common_utils::id_type::PaymentReferenceId,
profile_id: &common_utils::id_type::ProfileId,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::profile_id
.eq(profile_id.to_owned())
.and(dsl::merchant_reference_id.eq(merchant_reference_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 31,
"total_crates": null
} |
fn_clm_diesel_models_find_by_payment_id_merchant_id_-7217105875194163693 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/payment_intent
// Inherent implementation for PaymentIntent
pub async fn find_by_payment_id_merchant_id(
conn: &PgPooledConn,
payment_id: &common_utils::id_type::PaymentId,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::payment_id.eq(payment_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 31,
"total_crates": null
} |
fn_clm_diesel_models_find_by_merchant_reference_id_merchant_id_-7217105875194163693 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/payment_intent
// Inherent implementation for PaymentIntent
pub async fn find_by_merchant_reference_id_merchant_id(
conn: &PgPooledConn,
merchant_reference_id: &str,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::merchant_reference_id.eq(merchant_reference_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 28,
"total_crates": null
} |
fn_clm_diesel_models_insert_-7910549288161703268 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/capture
// Inherent implementation for CaptureNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<Capture> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_get_optional_connector_transaction_id_-7910549288161703268 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/capture
// Implementation of Capture for ConnectorTransactionIdTrait
fn get_optional_connector_transaction_id(&self) -> Option<&String> {
match self
.connector_capture_id
.as_ref()
.map(|capture_id| capture_id.get_txn_id(self.processor_capture_data.as_ref()))
.transpose()
{
Ok(capture_id) => capture_id,
// In case hashed data is missing from DB, use the hashed ID as connector transaction ID
Err(_) => self
.connector_capture_id
.as_ref()
.map(|txn_id| txn_id.get_id()),
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 42,
"total_crates": null
} |
fn_clm_diesel_models_find_all_by_merchant_id_payment_id_authorized_attempt_id_-7910549288161703268 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/capture
// Inherent implementation for Capture
pub async fn find_all_by_merchant_id_payment_id_authorized_attempt_id(
merchant_id: &common_utils::id_type::MerchantId,
payment_id: &common_utils::id_type::PaymentId,
authorized_attempt_id: &str,
conn: &PgPooledConn,
) -> StorageResult<Vec<Self>> {
generics::generic_filter::<<Self as HasTable>::Table, _, _, _>(
conn,
dsl::authorized_attempt_id
.eq(authorized_attempt_id.to_owned())
.and(dsl::merchant_id.eq(merchant_id.to_owned()))
.and(dsl::payment_id.eq(payment_id.to_owned())),
None,
None,
Some(dsl::created_at.asc()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_diesel_models_update_with_capture_id_-7910549288161703268 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/capture
// Inherent implementation for Capture
pub async fn update_with_capture_id(
self,
conn: &PgPooledConn,
capture: CaptureUpdate,
) -> StorageResult<Self> {
match generics::generic_update_with_unique_predicate_get_result::<
<Self as HasTable>::Table,
_,
_,
_,
>(
conn,
dsl::capture_id.eq(self.capture_id.to_owned()),
CaptureUpdateInternal::from(capture),
)
.await
{
Err(error) => match error.current_context() {
errors::DatabaseError::NoFieldsToUpdate => Ok(self),
_ => Err(error),
},
result => result,
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 26,
"total_crates": null
} |
fn_clm_diesel_models_find_by_capture_id_-7910549288161703268 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/capture
// Inherent implementation for Capture
pub async fn find_by_capture_id(conn: &PgPooledConn, capture_id: &str) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::capture_id.eq(capture_id.to_owned()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_diesel_models_insert_-400598811677313474 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/authorization
// Inherent implementation for AuthorizationNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<Authorization> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_update_by_merchant_id_authorization_id_-400598811677313474 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/authorization
// Inherent implementation for Authorization
pub async fn update_by_merchant_id_authorization_id(
conn: &PgPooledConn,
merchant_id: common_utils::id_type::MerchantId,
authorization_id: String,
authorization_update: AuthorizationUpdate,
) -> StorageResult<Self> {
match generics::generic_update_with_unique_predicate_get_result::<
<Self as HasTable>::Table,
_,
_,
_,
>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::authorization_id.eq(authorization_id.to_owned())),
AuthorizationUpdateInternal::from(authorization_update),
)
.await
{
Err(error) => match error.current_context() {
errors::DatabaseError::NotFound => Err(error.attach_printable(
"Authorization with the given Authorization ID does not exist",
)),
errors::DatabaseError::NoFieldsToUpdate => {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::authorization_id.eq(authorization_id.to_owned())),
)
.await
}
_ => Err(error),
},
result => result,
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 44,
"total_crates": null
} |
fn_clm_diesel_models_find_by_merchant_id_payment_id_-400598811677313474 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/authorization
// Inherent implementation for Authorization
pub async fn find_by_merchant_id_payment_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
payment_id: &common_utils::id_type::PaymentId,
) -> StorageResult<Vec<Self>> {
generics::generic_filter::<<Self as HasTable>::Table, _, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::payment_id.eq(payment_id.to_owned())),
None,
None,
Some(dsl::created_at.asc()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 30,
"total_crates": null
} |
fn_clm_diesel_models_insert_102172491290155872 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_authentication_method
// Inherent implementation for UserAuthenticationMethodNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<UserAuthenticationMethod> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_update_user_authentication_method_102172491290155872 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_authentication_method
// Inherent implementation for UserAuthenticationMethod
pub async fn update_user_authentication_method(
conn: &PgPooledConn,
id: &str,
user_authentication_method_update: UserAuthenticationMethodUpdate,
) -> StorageResult<Self> {
generics::generic_update_with_unique_predicate_get_result::<
<Self as HasTable>::Table,
_,
_,
_,
>(
conn,
dsl::id.eq(id.to_owned()),
OrgAuthenticationMethodUpdateInternal::from(user_authentication_method_update),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_diesel_models_list_user_authentication_methods_for_email_domain_102172491290155872 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_authentication_method
// Inherent implementation for UserAuthenticationMethod
pub async fn list_user_authentication_methods_for_email_domain(
conn: &PgPooledConn,
email_domain: &str,
) -> StorageResult<Vec<Self>> {
generics::generic_filter::<<Self as HasTable>::Table, _, _, _>(
conn,
dsl::email_domain.eq(email_domain.to_owned()),
None,
None,
Some(dsl::last_modified_at.asc()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_diesel_models_list_user_authentication_methods_for_owner_id_102172491290155872 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_authentication_method
// Inherent implementation for UserAuthenticationMethod
pub async fn list_user_authentication_methods_for_owner_id(
conn: &PgPooledConn,
owner_id: &str,
) -> StorageResult<Vec<Self>> {
generics::generic_filter::<<Self as HasTable>::Table, _, _, _>(
conn,
dsl::owner_id.eq(owner_id.to_owned()),
None,
None,
Some(dsl::last_modified_at.asc()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 33,
"total_crates": null
} |
fn_clm_diesel_models_list_user_authentication_methods_for_auth_id_102172491290155872 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user_authentication_method
// Inherent implementation for UserAuthenticationMethod
pub async fn list_user_authentication_methods_for_auth_id(
conn: &PgPooledConn,
auth_id: &str,
) -> StorageResult<Vec<Self>> {
generics::generic_filter::<<Self as HasTable>::Table, _, _, _>(
conn,
dsl::auth_id.eq(auth_id.to_owned()),
None,
None,
Some(dsl::last_modified_at.asc()),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 30,
"total_crates": null
} |
fn_clm_diesel_models_insert_-7599616204883818771 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/file
// Inherent implementation for FileMetadataNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<FileMetadata> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_update_-7599616204883818771 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/file
// Inherent implementation for FileMetadata
pub async fn update(
self,
conn: &PgPooledConn,
file_metadata: FileMetadataUpdate,
) -> StorageResult<Self> {
match generics::generic_update_with_unique_predicate_get_result::<
<Self as HasTable>::Table,
_,
_,
_,
>(
conn,
dsl::file_id.eq(self.file_id.to_owned()),
FileMetadataUpdateInternal::from(file_metadata),
)
.await
{
Err(error) => match error.current_context() {
errors::DatabaseError::NoFieldsToUpdate => Ok(self),
_ => Err(error),
},
result => result,
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 95,
"total_crates": null
} |
fn_clm_diesel_models_find_by_merchant_id_file_id_-7599616204883818771 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/file
// Inherent implementation for FileMetadata
pub async fn find_by_merchant_id_file_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
file_id: &str,
) -> StorageResult<Self> {
generics::generic_find_one::<<Self as HasTable>::Table, _, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::file_id.eq(file_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 28,
"total_crates": null
} |
fn_clm_diesel_models_delete_by_merchant_id_file_id_-7599616204883818771 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/file
// Inherent implementation for FileMetadata
pub async fn delete_by_merchant_id_file_id(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
file_id: &str,
) -> StorageResult<bool> {
generics::generic_delete::<<Self as HasTable>::Table, _>(
conn,
dsl::merchant_id
.eq(merchant_id.to_owned())
.and(dsl::file_id.eq(file_id.to_owned())),
)
.await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 28,
"total_crates": null
} |
fn_clm_diesel_models_insert_-6552833203320597897 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/theme
// Inherent implementation for ThemeNew
pub async fn insert(self, conn: &PgPooledConn) -> StorageResult<Theme> {
generics::generic_insert(conn, self).await
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 1127,
"total_crates": null
} |
fn_clm_diesel_models_lineage_filter_-6552833203320597897 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/theme
// Inherent implementation for Theme
fn lineage_filter(
lineage: ThemeLineage,
) -> Box<
dyn diesel::BoxableExpression<<Self as HasTable>::Table, Pg, SqlType = Nullable<Bool>>
+ 'static,
> {
match lineage {
ThemeLineage::Tenant { tenant_id } => Box::new(
dsl::tenant_id
.eq(tenant_id)
.and(dsl::org_id.is_null())
.and(dsl::merchant_id.is_null())
.and(dsl::profile_id.is_null())
.nullable(),
),
ThemeLineage::Organization { tenant_id, org_id } => Box::new(
dsl::tenant_id
.eq(tenant_id)
.and(dsl::org_id.eq(org_id))
.and(dsl::merchant_id.is_null())
.and(dsl::profile_id.is_null()),
),
ThemeLineage::Merchant {
tenant_id,
org_id,
merchant_id,
} => Box::new(
dsl::tenant_id
.eq(tenant_id)
.and(dsl::org_id.eq(org_id))
.and(dsl::merchant_id.eq(merchant_id))
.and(dsl::profile_id.is_null()),
),
ThemeLineage::Profile {
tenant_id,
org_id,
merchant_id,
profile_id,
} => Box::new(
dsl::tenant_id
.eq(tenant_id)
.and(dsl::org_id.eq(org_id))
.and(dsl::merchant_id.eq(merchant_id))
.and(dsl::profile_id.eq(profile_id)),
),
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 83,
"total_crates": null
} |
fn_clm_diesel_models_lineage_hierarchy_filter_-6552833203320597897 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/theme
// Inherent implementation for Theme
/// Matches all themes that belong to the specified hierarchy level or below
fn lineage_hierarchy_filter(
lineage: ThemeLineage,
) -> Box<
dyn diesel::BoxableExpression<<Self as HasTable>::Table, Pg, SqlType = Nullable<Bool>>
+ 'static,
> {
match lineage {
ThemeLineage::Tenant { tenant_id } => Box::new(dsl::tenant_id.eq(tenant_id).nullable()),
ThemeLineage::Organization { tenant_id, org_id } => Box::new(
dsl::tenant_id
.eq(tenant_id)
.and(dsl::org_id.eq(org_id))
.nullable(),
),
ThemeLineage::Merchant {
tenant_id,
org_id,
merchant_id,
} => Box::new(
dsl::tenant_id
.eq(tenant_id)
.and(dsl::org_id.eq(org_id))
.and(dsl::merchant_id.eq(merchant_id))
.nullable(),
),
ThemeLineage::Profile {
tenant_id,
org_id,
merchant_id,
profile_id,
} => Box::new(
dsl::tenant_id
.eq(tenant_id)
.and(dsl::org_id.eq(org_id))
.and(dsl::merchant_id.eq(merchant_id))
.and(dsl::profile_id.eq(profile_id))
.nullable(),
),
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 59,
"total_crates": null
} |
fn_clm_diesel_models_find_most_specific_theme_in_lineage_-6552833203320597897 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/theme
// Inherent implementation for Theme
pub async fn find_most_specific_theme_in_lineage(
conn: &PgPooledConn,
lineage: ThemeLineage,
) -> StorageResult<Self> {
let query = <Self as HasTable>::table().into_boxed();
let query =
lineage
.get_same_and_higher_lineages()
.into_iter()
.fold(query, |mut query, lineage| {
query = query.or_filter(Self::lineage_filter(lineage));
query
});
logger::debug!(query = %debug_query::<Pg,_>(&query).to_string());
let data: Vec<Self> = match track_database_call::<Self, _, _>(
query.get_results_async(conn),
DatabaseOperation::Filter,
)
.await
{
Ok(value) => Ok(value),
Err(err) => match err {
DieselError::NotFound => Err(report!(err)).change_context(DatabaseError::NotFound),
_ => Err(report!(err)).change_context(DatabaseError::Others),
},
}?;
data.into_iter()
.min_by_key(|theme| theme.entity_type)
.ok_or(report!(DatabaseError::NotFound))
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 50,
"total_crates": null
} |
fn_clm_diesel_models_find_all_by_lineage_hierarchy_-6552833203320597897 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/theme
// Inherent implementation for Theme
/// Finds all themes that match the specified lineage hierarchy.
pub async fn find_all_by_lineage_hierarchy(
conn: &PgPooledConn,
lineage: ThemeLineage,
) -> StorageResult<Vec<Self>> {
let filter = Self::lineage_hierarchy_filter(lineage);
let query = <Self as HasTable>::table().filter(filter).into_boxed();
logger::debug!(query = %debug_query::<Pg,_>(&query).to_string());
match track_database_call::<Self, _, _>(
query.get_results_async(conn),
DatabaseOperation::Filter,
)
.await
{
Ok(themes) => Ok(themes),
Err(err) => match err {
DieselError::NotFound => Err(report!(err)).change_context(DatabaseError::NotFound),
_ => Err(report!(err)).change_context(DatabaseError::Others),
},
}
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 32,
"total_crates": null
} |
fn_clm_diesel_models_delete_payment_intents_-3983289827440149941 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/sample_data
pub async fn delete_payment_intents(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<Vec<PaymentIntent>> {
let query = diesel::delete(<PaymentIntent>::table())
.filter(payment_intent_dsl::merchant_id.eq(merchant_id.to_owned()))
.filter(payment_intent_dsl::merchant_reference_id.like("test_%"));
logger::debug!(query = %debug_query::<diesel::pg::Pg,_>(&query).to_string());
query
.get_results_async(conn)
.await
.change_context(errors::DatabaseError::Others)
.attach_printable("Error while deleting payment intents")
.and_then(|result| match result.len() {
n if n > 0 => {
logger::debug!("{n} records deleted");
Ok(result)
}
0 => Err(error_stack::report!(errors::DatabaseError::NotFound)
.attach_printable("No records deleted")),
_ => Ok(result),
})
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_diesel_models_delete_payment_attempts_-3983289827440149941 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/sample_data
pub async fn delete_payment_attempts(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<Vec<PaymentAttempt>> {
let query = diesel::delete(<PaymentAttempt>::table())
.filter(payment_attempt_dsl::merchant_id.eq(merchant_id.to_owned()))
.filter(payment_attempt_dsl::payment_id.like("test_%"));
logger::debug!(query = %debug_query::<diesel::pg::Pg,_>(&query).to_string());
query
.get_results_async(conn)
.await
.change_context(errors::DatabaseError::Others)
.attach_printable("Error while deleting payment attempts")
.and_then(|result| match result.len() {
n if n > 0 => {
logger::debug!("{n} records deleted");
Ok(result)
}
0 => Err(error_stack::report!(errors::DatabaseError::NotFound)
.attach_printable("No records deleted")),
_ => Ok(result),
})
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_diesel_models_delete_refunds_-3983289827440149941 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/sample_data
pub async fn delete_refunds(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<Vec<Refund>> {
let query = diesel::delete(<Refund>::table())
.filter(refund_dsl::merchant_id.eq(merchant_id.to_owned()))
.filter(refund_dsl::payment_id.like("test_%"));
logger::debug!(query = %debug_query::<diesel::pg::Pg,_>(&query).to_string());
query
.get_results_async(conn)
.await
.change_context(errors::DatabaseError::Others)
.attach_printable("Error while deleting refunds")
.and_then(|result| match result.len() {
n if n > 0 => {
logger::debug!("{n} records deleted");
Ok(result)
}
0 => Err(error_stack::report!(errors::DatabaseError::NotFound)
.attach_printable("No records deleted")),
_ => Ok(result),
})
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_diesel_models_delete_disputes_-3983289827440149941 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/sample_data
pub async fn delete_disputes(
conn: &PgPooledConn,
merchant_id: &common_utils::id_type::MerchantId,
) -> StorageResult<Vec<Dispute>> {
let query = diesel::delete(<Dispute>::table())
.filter(dispute_dsl::merchant_id.eq(merchant_id.to_owned()))
.filter(dispute_dsl::dispute_id.like("test_%"));
logger::debug!(query = %debug_query::<diesel::pg::Pg,_>(&query).to_string());
query
.get_results_async(conn)
.await
.change_context(errors::DatabaseError::Others)
.attach_printable("Error while deleting disputes")
.and_then(|result| match result.len() {
n if n > 0 => {
logger::debug!("{n} records deleted");
Ok(result)
}
0 => Err(error_stack::report!(errors::DatabaseError::NotFound)
.attach_printable("No records deleted")),
_ => Ok(result),
})
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_diesel_models_insert_payment_intents_-3983289827440149941 | clm | function | // Repository: hyperswitch
// Crate: diesel_models
// Purpose: Database schema types directly mapping to PostgreSQL tables
// Module: crates/diesel_models/src/query/user/sample_data
pub async fn insert_payment_intents(
conn: &PgPooledConn,
batch: Vec<PaymentIntentNew>,
) -> StorageResult<Vec<PaymentIntent>> {
let query = diesel::insert_into(<PaymentIntent>::table()).values(batch);
logger::debug!(query = %debug_query::<diesel::pg::Pg,_>(&query).to_string());
query
.get_results_async(conn)
.await
.change_context(errors::DatabaseError::Others)
.attach_printable("Error while inserting payment intents")
}
| {
"crate": "diesel_models",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 22,
"total_crates": null
} |
fn_clm_scheduler_start_producer_-5719998533283605215 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/producer
pub async fn start_producer<T, U, F>(
state: &T,
scheduler_settings: Arc<SchedulerSettings>,
(tx, mut rx): (mpsc::Sender<()>, mpsc::Receiver<()>),
app_state_to_session_state: F,
) -> CustomResult<(), errors::ProcessTrackerError>
where
F: Fn(&T, &id_type::TenantId) -> CustomResult<U, errors::ProcessTrackerError>,
T: SchedulerAppState,
U: SchedulerSessionState,
{
use std::time::Duration;
use rand::distributions::{Distribution, Uniform};
let mut rng = rand::thread_rng();
// TODO: this can be removed once rand-0.9 is released
// reference - https://github.com/rust-random/rand/issues/1326#issuecomment-1635331942
#[allow(unknown_lints)]
#[allow(clippy::unnecessary_fallible_conversions)]
let timeout = Uniform::try_from(0..=scheduler_settings.loop_interval)
.change_context(errors::ProcessTrackerError::ConfigurationError)?;
tokio::time::sleep(Duration::from_millis(timeout.sample(&mut rng))).await;
let mut interval =
tokio::time::interval(Duration::from_millis(scheduler_settings.loop_interval));
let mut shutdown_interval = tokio::time::interval(Duration::from_millis(
scheduler_settings.graceful_shutdown_interval,
));
let signal = common_utils::signals::get_allowed_signals()
.map_err(|error| {
logger::error!("Signal Handler Error: {:?}", error);
errors::ProcessTrackerError::ConfigurationError
})
.attach_printable("Failed while creating a signals handler")?;
let handle = signal.handle();
let task_handle =
tokio::spawn(common_utils::signals::signal_handler(signal, tx).in_current_span());
loop {
match rx.try_recv() {
Err(mpsc::error::TryRecvError::Empty) => {
interval.tick().await;
let tenants = state.get_tenants();
for tenant in tenants {
let session_state = app_state_to_session_state(state, &tenant)?;
match run_producer_flow(&session_state, &scheduler_settings).await {
Ok(_) => (),
Err(error) => {
// Intentionally not propagating error to caller.
// Any errors that occur in the producer flow must be handled here only, as
// this is the topmost level function which is concerned with the producer flow.
error!(?error);
}
}
}
}
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => {
logger::debug!("Awaiting shutdown!");
rx.close();
shutdown_interval.tick().await;
logger::info!("Terminating producer");
break;
}
}
}
handle.close();
task_handle
.await
.change_context(errors::ProcessTrackerError::UnexpectedFlow)?;
Ok(())
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 65,
"total_crates": null
} |
fn_clm_scheduler_fetch_producer_tasks_-5719998533283605215 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/producer
pub async fn fetch_producer_tasks(
db: &dyn SchedulerInterface,
conf: &SchedulerSettings,
) -> CustomResult<Vec<storage::ProcessTracker>, errors::ProcessTrackerError> {
let upper = conf.producer.upper_fetch_limit;
let lower = conf.producer.lower_fetch_limit;
let now = common_utils::date_time::now();
// Add these to validations
let time_upper_limit = now.checked_add(Duration::seconds(upper)).ok_or_else(|| {
report!(errors::ProcessTrackerError::ConfigurationError)
.attach_printable("Error obtaining upper limit to fetch producer tasks")
})?;
let time_lower_limit = now.checked_sub(Duration::seconds(lower)).ok_or_else(|| {
report!(errors::ProcessTrackerError::ConfigurationError)
.attach_printable("Error obtaining lower limit to fetch producer tasks")
})?;
let mut new_tasks = db
.find_processes_by_time_status(
time_lower_limit,
time_upper_limit,
ProcessTrackerStatus::New,
None,
)
.await
.change_context(errors::ProcessTrackerError::ProcessFetchingFailed)?;
let mut pending_tasks = db
.find_processes_by_time_status(
time_lower_limit,
time_upper_limit,
ProcessTrackerStatus::Pending,
None,
)
.await
.change_context(errors::ProcessTrackerError::ProcessFetchingFailed)?;
if new_tasks.is_empty() {
warn!("No new tasks found for producer to schedule");
}
if pending_tasks.is_empty() {
warn!("No pending tasks found for producer to schedule");
}
new_tasks.append(&mut pending_tasks);
// Safety: Assuming we won't deal with more than `u64::MAX` tasks at once
#[allow(clippy::as_conversions)]
metrics::TASKS_PICKED_COUNT.add(new_tasks.len() as u64, &[]);
Ok(new_tasks)
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 49,
"total_crates": null
} |
fn_clm_scheduler_run_producer_flow_-5719998533283605215 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/producer
pub async fn run_producer_flow<T>(
state: &T,
settings: &SchedulerSettings,
) -> CustomResult<(), errors::ProcessTrackerError>
where
T: SchedulerSessionState,
{
lock_acquire_release::<_, _, _>(state.get_db().as_scheduler(), settings, move || async {
let tasks = fetch_producer_tasks(state.get_db().as_scheduler(), settings).await?;
debug!("Producer count of tasks {}", tasks.len());
// [#268]: Allow task based segregation of tasks
divide_and_append_tasks(
state.get_db().as_scheduler(),
SchedulerFlow::Producer,
tasks,
settings,
)
.await?;
Ok(())
})
.await?;
Ok(())
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 29,
"total_crates": null
} |
fn_clm_scheduler_as_scheduler_-7931970490439290358 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/scheduler
// Implementation of T for AsSchedulerInterface
fn as_scheduler(&self) -> &dyn SchedulerInterface {
self
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 194,
"total_crates": null
} |
fn_clm_scheduler_start_process_tracker_-7931970490439290358 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/scheduler
pub async fn start_process_tracker<
T: SchedulerAppState + 'static,
U: SchedulerSessionState + 'static,
F,
>(
state: &T,
scheduler_flow: SchedulerFlow,
scheduler_settings: Arc<SchedulerSettings>,
channel: (mpsc::Sender<()>, mpsc::Receiver<()>),
runner_from_task: impl workflows::ProcessTrackerWorkflows<U> + 'static + Copy + std::fmt::Debug,
app_state_to_session_state: F,
) -> CustomResult<(), errors::ProcessTrackerError>
where
F: Fn(&T, &id_type::TenantId) -> CustomResult<U, errors::ProcessTrackerError>,
{
match scheduler_flow {
SchedulerFlow::Producer => {
producer::start_producer(
state,
scheduler_settings,
channel,
app_state_to_session_state,
)
.await?
}
SchedulerFlow::Consumer => {
consumer::start_consumer(
state,
scheduler_settings,
runner_from_task,
channel,
app_state_to_session_state,
)
.await?
}
SchedulerFlow::Cleaner => {
error!("This flow has not been implemented yet!");
}
}
Ok(())
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 17,
"total_crates": null
} |
fn_clm_scheduler_from_3299268442085175036 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/errors
// Implementation of ProcessTrackerError for From<error_stack::Report<T>>
fn from(error: error_stack::Report<T>) -> Self {
logger::error!(?error);
error.current_context().to_pt_error()
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 2604,
"total_crates": null
} |
fn_clm_scheduler_to_pt_error_3299268442085175036 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/errors
// Implementation of ApiErrorResponse for PTError
fn to_pt_error(&self) -> ProcessTrackerError {
ProcessTrackerError::EApiErrorResponse
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 11,
"total_crates": null
} |
fn_clm_scheduler_default_7295555580482563446 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/settings
// Implementation of DrainerSettings for Default
fn default() -> Self {
Self {
stream_name: "DRAINER_STREAM".into(),
num_partitions: 64,
max_read_count: 100,
shutdown_interval: 1000,
loop_interval: 500,
}
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7705,
"total_crates": null
} |
fn_clm_scheduler_validate_7295555580482563446 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/settings
// Inherent implementation for DrainerSettings
pub fn validate(&self) -> Result<(), ApplicationError> {
common_utils::fp_utils::when(self.stream_name.is_default_or_empty(), || {
Err(ApplicationError::InvalidConfigurationValueError(
"drainer stream name must not be empty".into(),
))
})
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 227,
"total_crates": null
} |
fn_clm_scheduler_start_consumer_-8083499466557052016 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer
pub async fn start_consumer<T: SchedulerAppState + 'static, U: SchedulerSessionState + 'static, F>(
state: &T,
settings: sync::Arc<SchedulerSettings>,
workflow_selector: impl workflows::ProcessTrackerWorkflows<U> + 'static + Copy + std::fmt::Debug,
(tx, mut rx): (mpsc::Sender<()>, mpsc::Receiver<()>),
app_state_to_session_state: F,
) -> CustomResult<(), errors::ProcessTrackerError>
where
F: Fn(&T, &id_type::TenantId) -> CustomResult<U, errors::ProcessTrackerError>,
{
use std::time::Duration;
use rand::distributions::{Distribution, Uniform};
let mut rng = rand::thread_rng();
// TODO: this can be removed once rand-0.9 is released
// reference - https://github.com/rust-random/rand/issues/1326#issuecomment-1635331942
#[allow(unknown_lints)]
#[allow(clippy::unnecessary_fallible_conversions)]
let timeout = Uniform::try_from(0..=settings.loop_interval)
.change_context(errors::ProcessTrackerError::ConfigurationError)?;
tokio::time::sleep(Duration::from_millis(timeout.sample(&mut rng))).await;
let mut interval = tokio::time::interval(Duration::from_millis(settings.loop_interval));
let mut shutdown_interval =
tokio::time::interval(Duration::from_millis(settings.graceful_shutdown_interval));
let consumer_operation_counter = sync::Arc::new(atomic::AtomicU64::new(0));
let signal = get_allowed_signals()
.map_err(|error| {
logger::error!(?error, "Signal Handler Error");
errors::ProcessTrackerError::ConfigurationError
})
.attach_printable("Failed while creating a signals handler")?;
let handle = signal.handle();
let task_handle =
tokio::spawn(common_utils::signals::signal_handler(signal, tx).in_current_span());
'consumer: loop {
match rx.try_recv() {
Err(mpsc::error::TryRecvError::Empty) => {
interval.tick().await;
// A guard from env to disable the consumer
if settings.consumer.disabled {
continue;
}
consumer_operation_counter.fetch_add(1, atomic::Ordering::SeqCst);
let start_time = std_time::Instant::now();
let tenants = state.get_tenants();
for tenant in tenants {
let session_state = app_state_to_session_state(state, &tenant)?;
pt_utils::consumer_operation_handler(
session_state.clone(),
settings.clone(),
|error| {
logger::error!(?error, "Failed to perform consumer operation");
},
workflow_selector,
)
.await;
}
let end_time = std_time::Instant::now();
let duration = end_time.saturating_duration_since(start_time).as_secs_f64();
logger::debug!("Time taken to execute consumer_operation: {}s", duration);
let current_count =
consumer_operation_counter.fetch_sub(1, atomic::Ordering::SeqCst);
logger::info!("Current tasks being executed: {}", current_count);
}
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => {
logger::debug!("Awaiting shutdown!");
rx.close();
loop {
shutdown_interval.tick().await;
let active_tasks = consumer_operation_counter.load(atomic::Ordering::Acquire);
logger::info!("Active tasks: {active_tasks}");
match active_tasks {
0 => {
logger::info!("Terminating consumer");
break 'consumer;
}
_ => continue,
}
}
}
}
}
handle.close();
task_handle
.await
.change_context(errors::ProcessTrackerError::UnexpectedFlow)?;
Ok(())
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 87,
"total_crates": null
} |
fn_clm_scheduler_fetch_consumer_tasks_-8083499466557052016 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer
pub async fn fetch_consumer_tasks(
db: &dyn ProcessTrackerInterface,
redis_conn: &RedisConnectionPool,
stream_name: &str,
group_name: &str,
consumer_name: &str,
) -> CustomResult<Vec<storage::ProcessTracker>, errors::ProcessTrackerError> {
let batches = pt_utils::get_batches(redis_conn, stream_name, group_name, consumer_name).await?;
// Returning early to avoid execution of database queries when `batches` is empty
if batches.is_empty() {
return Ok(Vec::new());
}
let mut tasks = batches.into_iter().fold(Vec::new(), |mut acc, batch| {
acc.extend_from_slice(
batch
.trackers
.into_iter()
.filter(|task| task.is_valid_business_status(&valid_business_statuses()))
.collect::<Vec<_>>()
.as_slice(),
);
acc
});
let task_ids = tasks
.iter()
.map(|task| task.id.to_owned())
.collect::<Vec<_>>();
db.process_tracker_update_process_status_by_ids(
task_ids,
storage::ProcessTrackerUpdate::StatusUpdate {
status: enums::ProcessTrackerStatus::ProcessStarted,
business_status: None,
},
)
.await
.change_context(errors::ProcessTrackerError::ProcessFetchingFailed)?;
tasks
.iter_mut()
.for_each(|x| x.status = enums::ProcessTrackerStatus::ProcessStarted);
Ok(tasks)
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 54,
"total_crates": null
} |
fn_clm_scheduler_consumer_operations_-8083499466557052016 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer
pub async fn consumer_operations<T: SchedulerSessionState + 'static>(
state: &T,
settings: &SchedulerSettings,
workflow_selector: impl workflows::ProcessTrackerWorkflows<T> + 'static + Copy + std::fmt::Debug,
) -> CustomResult<(), errors::ProcessTrackerError> {
let stream_name = settings.stream.clone();
let group_name = settings.consumer.consumer_group.clone();
let consumer_name = format!("consumer_{}", Uuid::new_v4());
let _group_created = &mut state
.get_db()
.consumer_group_create(&stream_name, &group_name, &RedisEntryId::AfterLastID)
.await;
let mut tasks = state
.get_db()
.as_scheduler()
.fetch_consumer_tasks(&stream_name, &group_name, &consumer_name)
.await?;
if !tasks.is_empty() {
logger::info!("{} picked {} tasks", consumer_name, tasks.len());
}
let mut handler = vec![];
for task in tasks.iter_mut() {
let pickup_time = common_utils::date_time::now();
pt_utils::add_histogram_metrics(&pickup_time, task, &stream_name);
metrics::TASK_CONSUMED.add(1, &[]);
handler.push(tokio::task::spawn(start_workflow(
state.clone(),
task.clone(),
pickup_time,
workflow_selector,
)))
}
future::join_all(handler).await;
Ok(())
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 49,
"total_crates": null
} |
fn_clm_scheduler_consumer_error_handler_-8083499466557052016 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer
pub async fn consumer_error_handler(
state: &(dyn SchedulerInterface + 'static),
process: storage::ProcessTracker,
error: errors::ProcessTrackerError,
) -> CustomResult<(), errors::ProcessTrackerError> {
logger::error!(pt.name=?process.name, pt.id=%process.id, ?error, "Failed to execute workflow");
state
.process_tracker_update_process_status_by_ids(
vec![process.id],
storage::ProcessTrackerUpdate::StatusUpdate {
status: enums::ProcessTrackerStatus::Finish,
business_status: Some(String::from(storage::business_status::GLOBAL_ERROR)),
},
)
.await
.change_context(errors::ProcessTrackerError::ProcessUpdateFailed)?;
Ok(())
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 34,
"total_crates": null
} |
fn_clm_scheduler_start_workflow_-8083499466557052016 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer
pub async fn start_workflow<T>(
state: T,
process: storage::ProcessTracker,
_pickup_time: PrimitiveDateTime,
workflow_selector: impl workflows::ProcessTrackerWorkflows<T> + 'static + std::fmt::Debug,
) -> CustomResult<(), errors::ProcessTrackerError>
where
T: SchedulerSessionState,
{
tracing::Span::current().record("workflow_id", Uuid::new_v4().to_string());
logger::info!(pt.name=?process.name, pt.id=%process.id);
let res = workflow_selector
.trigger_workflow(&state.clone(), process.clone())
.await
.inspect_err(|error| {
logger::error!(?error, "Failed to trigger workflow");
});
metrics::TASK_PROCESSED.add(1, &[]);
res
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 31,
"total_crates": null
} |
fn_clm_scheduler_get_batches_9714673556109799 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/utils
pub async fn get_batches(
conn: &RedisConnectionPool,
stream_name: &str,
group_name: &str,
consumer_name: &str,
) -> CustomResult<Vec<ProcessTrackerBatch>, errors::ProcessTrackerError> {
let response = match conn
.stream_read_with_options(
stream_name,
RedisEntryId::UndeliveredEntryID,
// Update logic for collecting to Vec and flattening, if count > 1 is provided
Some(1),
None,
Some((group_name, consumer_name)),
)
.await
{
Ok(response) => response,
Err(error) => {
if let redis_interface::errors::RedisError::StreamEmptyOrNotAvailable =
error.current_context()
{
logger::debug!("No batches processed as stream is empty");
return Ok(Vec::new());
} else {
return Err(error.change_context(errors::ProcessTrackerError::BatchNotFound));
}
}
};
metrics::BATCHES_CONSUMED.add(1, &[]);
let (batches, entry_ids): (Vec<Vec<ProcessTrackerBatch>>, Vec<Vec<String>>) = response.into_values().map(|entries| {
entries.into_iter().try_fold(
(Vec::new(), Vec::new()),
|(mut batches, mut entry_ids), entry| {
// Redis entry ID
entry_ids.push(entry.0);
// Value HashMap
batches.push(ProcessTrackerBatch::from_redis_stream_entry(entry.1)?);
Ok((batches, entry_ids))
},
)
}).collect::<CustomResult<Vec<(Vec<ProcessTrackerBatch>, Vec<String>)>, errors::ProcessTrackerError>>()?
.into_iter()
.unzip();
// Flattening the Vec's since the count provided above is 1. This needs to be updated if a
// count greater than 1 is provided.
let batches = batches.into_iter().flatten().collect::<Vec<_>>();
let entry_ids = entry_ids.into_iter().flatten().collect::<Vec<_>>();
conn.stream_acknowledge_entries(&stream_name.into(), group_name, entry_ids.clone())
.await
.map_err(|error| {
logger::error!(?error, "Error acknowledging batch in stream");
error.change_context(errors::ProcessTrackerError::BatchUpdateFailed)
})?;
conn.stream_delete_entries(&stream_name.into(), entry_ids.clone())
.await
.map_err(|error| {
logger::error!(?error, "Error deleting batch from stream");
error.change_context(errors::ProcessTrackerError::BatchDeleteFailed)
})?;
Ok(batches)
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 73,
"total_crates": null
} |
fn_clm_scheduler_update_status_and_append_9714673556109799 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/utils
pub async fn update_status_and_append<T>(
state: &T,
flow: SchedulerFlow,
pt_batch: ProcessTrackerBatch,
) -> CustomResult<(), errors::ProcessTrackerError>
where
T: SchedulerInterface + Send + Sync + ?Sized,
{
let process_ids: Vec<String> = pt_batch
.trackers
.iter()
.map(|process| process.id.to_owned())
.collect();
match flow {
SchedulerFlow::Producer => state
.process_tracker_update_process_status_by_ids(
process_ids,
storage::ProcessTrackerUpdate::StatusUpdate {
status: ProcessTrackerStatus::Processing,
business_status: None,
},
)
.await
.map_or_else(
|error| {
logger::error!(?error, "Error while updating process status");
Err(error.change_context(errors::ProcessTrackerError::ProcessUpdateFailed))
},
|count| {
logger::debug!("Updated status of {count} processes");
Ok(())
},
),
SchedulerFlow::Cleaner => {
let res = state
.reinitialize_limbo_processes(process_ids, common_utils::date_time::now())
.await;
match res {
Ok(count) => {
logger::debug!("Reinitialized {count} processes");
Ok(())
}
Err(error) => {
logger::error!(?error, "Error while reinitializing processes");
Err(error.change_context(errors::ProcessTrackerError::ProcessUpdateFailed))
}
}
}
_ => {
let error = format!("Unexpected scheduler flow {flow:?}");
logger::error!(%error);
Err(report!(errors::ProcessTrackerError::UnexpectedFlow).attach_printable(error))
}
}?;
let field_value_pairs = pt_batch.to_redis_field_value_pairs()?;
match state
.stream_append_entry(
&pt_batch.stream_name,
&RedisEntryId::AutoGeneratedID,
field_value_pairs,
)
.await
.change_context(errors::ProcessTrackerError::BatchInsertionFailed)
{
Ok(x) => Ok(x),
Err(mut err) => {
let update_res = state
.process_tracker_update_process_status_by_ids(
pt_batch
.trackers
.iter()
.map(|process| process.id.clone())
.collect(),
storage::ProcessTrackerUpdate::StatusUpdate {
status: ProcessTrackerStatus::Processing,
business_status: None,
},
)
.await
.map_or_else(
|error| {
logger::error!(?error, "Error while updating process status");
Err(error.change_context(errors::ProcessTrackerError::ProcessUpdateFailed))
},
|count| {
logger::debug!("Updated status of {count} processes");
Ok(())
},
);
match update_res {
Ok(_) => (),
Err(inner_err) => {
err.extend_one(inner_err);
}
};
Err(err)
}
}
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 57,
"total_crates": null
} |
fn_clm_scheduler_get_time_from_delta_9714673556109799 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/utils
pub fn get_time_from_delta(delta: Option<i32>) -> Option<time::PrimitiveDateTime> {
delta.map(|t| common_utils::date_time::now().saturating_add(time::Duration::seconds(t.into())))
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 41,
"total_crates": null
} |
fn_clm_scheduler_divide_into_batches_9714673556109799 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/utils
pub fn divide_into_batches(
batch_size: usize,
tasks: Vec<storage::ProcessTracker>,
batch_creation_time: time::PrimitiveDateTime,
conf: &SchedulerSettings,
) -> Vec<ProcessTrackerBatch> {
let batch_id = Uuid::new_v4().to_string();
tasks
.chunks(batch_size)
.fold(Vec::new(), |mut batches, item| {
let batch = ProcessTrackerBatch {
id: batch_id.clone(),
group_name: conf.consumer.consumer_group.clone(),
stream_name: conf.stream.clone(),
connection_name: String::new(),
created_time: batch_creation_time,
rule: String::new(), // is it required?
trackers: item.to_vec(),
};
batches.push(batch);
batches
})
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 37,
"total_crates": null
} |
fn_clm_scheduler_get_schedule_time_9714673556109799 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/utils
pub fn get_schedule_time(
mapping: process_data::ConnectorPTMapping,
merchant_id: &common_utils::id_type::MerchantId,
retry_count: i32,
) -> Option<i32> {
let mapping = match mapping.custom_merchant_mapping.get(merchant_id) {
Some(map) => map.clone(),
None => mapping.default_mapping,
};
// For first try, get the `start_after` time
if retry_count == 0 {
Some(mapping.start_after)
} else {
get_delay(retry_count, &mapping.frequencies)
}
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 31,
"total_crates": null
} |
fn_clm_scheduler_default_-6535671408034007769 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/configs/defaults
// Implementation of super::settings::Server for Default
fn default() -> Self {
Self {
port: 8080,
workers: num_cpus::get_physical(),
host: "localhost".into(),
}
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7707,
"total_crates": null
} |
fn_clm_scheduler_validate_5687315576467672005 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/configs/validations
// Inherent implementation for super::settings::Server
pub fn validate(&self) -> Result<(), ApplicationError> {
common_utils::fp_utils::when(self.host.is_default_or_empty(), || {
Err(ApplicationError::InvalidConfigurationValueError(
"server host must not be empty".into(),
))
})
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 227,
"total_crates": null
} |
fn_clm_scheduler_get_key_-1943856801412940284 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/queue
// Implementation of MockDb for QueueInterface
async fn get_key(&self, _key: &str) -> CustomResult<Vec<u8>, RedisError> {
Err(RedisError::RedisConnectionError.into())
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 82,
"total_crates": null
} |
fn_clm_scheduler_consumer_group_create_-1943856801412940284 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/queue
// Implementation of MockDb for QueueInterface
async fn consumer_group_create(
&self,
_stream: &str,
_group: &str,
_id: &RedisEntryId,
) -> CustomResult<(), RedisError> {
// [#172]: Implement function for `MockDb`
Err(RedisError::ConsumerGroupCreateFailed)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 20,
"total_crates": null
} |
fn_clm_scheduler_stream_append_entry_-1943856801412940284 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/queue
// Implementation of MockDb for QueueInterface
async fn stream_append_entry(
&self,
_stream: &str,
_entry_id: &RedisEntryId,
_fields: Vec<(&str, String)>,
) -> CustomResult<(), RedisError> {
// [#172]: Implement function for `MockDb`
Err(RedisError::StreamAppendFailed)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 20,
"total_crates": null
} |
fn_clm_scheduler_fetch_consumer_tasks_-1943856801412940284 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/queue
// Implementation of MockDb for QueueInterface
async fn fetch_consumer_tasks(
&self,
_stream_name: &str,
_group_name: &str,
_consumer_name: &str,
) -> CustomResult<Vec<storage::ProcessTracker>, ProcessTrackerError> {
// [#172]: Implement function for `MockDb`
Err(ProcessTrackerError::ResourceFetchingFailed {
resource_name: "consumer_tasks".to_string(),
})?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 16,
"total_crates": null
} |
fn_clm_scheduler_acquire_pt_lock_-1943856801412940284 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/queue
// Implementation of MockDb for QueueInterface
async fn acquire_pt_lock(
&self,
_tag: &str,
_lock_key: &str,
_lock_val: &str,
_ttl: i64,
) -> CustomResult<bool, RedisError> {
// [#172]: Implement function for `MockDb`
Ok(false)
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 14,
"total_crates": null
} |
fn_clm_scheduler_finish_process_with_business_status_3032781318561663227 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/process_tracker
// Implementation of MockDb for ProcessTrackerInterface
async fn finish_process_with_business_status(
&self,
_this: storage::ProcessTracker,
_business_status: &'static str,
) -> CustomResult<(), errors::StorageError> {
// [#172]: Implement function for `MockDb`
Err(errors::StorageError::MockDbError)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 116,
"total_crates": null
} |
fn_clm_scheduler_insert_process_3032781318561663227 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/process_tracker
// Implementation of MockDb for ProcessTrackerInterface
async fn insert_process(
&self,
new: storage::ProcessTrackerNew,
) -> CustomResult<storage::ProcessTracker, errors::StorageError> {
let mut processes = self.processes.lock().await;
let process = storage::ProcessTracker {
id: new.id,
name: new.name,
tag: new.tag,
runner: new.runner,
retry_count: new.retry_count,
schedule_time: new.schedule_time,
rule: new.rule,
tracking_data: new.tracking_data,
business_status: new.business_status,
status: new.status,
event: new.event,
created_at: new.created_at,
updated_at: new.updated_at,
version: new.version,
};
processes.push(process.clone());
Ok(process)
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 68,
"total_crates": null
} |
fn_clm_scheduler_find_process_by_id_3032781318561663227 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/process_tracker
// Implementation of MockDb for ProcessTrackerInterface
async fn find_process_by_id(
&self,
id: &str,
) -> CustomResult<Option<storage::ProcessTracker>, errors::StorageError> {
let optional = self
.processes
.lock()
.await
.iter()
.find(|process| process.id == id)
.cloned();
Ok(optional)
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 58,
"total_crates": null
} |
fn_clm_scheduler_update_process_3032781318561663227 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/process_tracker
// Implementation of MockDb for ProcessTrackerInterface
async fn update_process(
&self,
_this: storage::ProcessTracker,
_process: storage::ProcessTrackerUpdate,
) -> CustomResult<storage::ProcessTracker, errors::StorageError> {
// [#172]: Implement function for `MockDb`
Err(errors::StorageError::MockDbError)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 44,
"total_crates": null
} |
fn_clm_scheduler_retry_process_3032781318561663227 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/db/process_tracker
// Implementation of MockDb for ProcessTrackerInterface
async fn retry_process(
&self,
_this: storage::ProcessTracker,
_schedule_time: PrimitiveDateTime,
) -> CustomResult<(), errors::StorageError> {
// [#172]: Implement function for `MockDb`
Err(errors::StorageError::MockDbError)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 38,
"total_crates": null
} |
fn_clm_scheduler_error_handler_-2544748533679144568 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer/workflows
/// Callback function after error received from `execute_workflow`
async fn error_handler<'a>(
&'a self,
_state: &'a T,
_process: storage::ProcessTracker,
_error: errors::ProcessTrackerError,
) -> CustomResult<(), errors::ProcessTrackerError> {
Err(errors::ProcessTrackerError::NotImplemented)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 6,
"total_crates": null
} |
fn_clm_scheduler_trigger_workflow_-2544748533679144568 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer/workflows
async fn trigger_workflow<'a>(
&'a self,
_state: &'a T,
_process: storage::ProcessTracker,
) -> CustomResult<(), errors::ProcessTrackerError> {
Err(errors::ProcessTrackerError::NotImplemented)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 3,
"total_crates": null
} |
fn_clm_scheduler_execute_workflow_-2544748533679144568 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer/workflows
/// The core execution of the workflow
async fn execute_workflow<'a>(
&'a self,
_state: &'a T,
_process: storage::ProcessTracker,
) -> Result<(), errors::ProcessTrackerError> {
Err(errors::ProcessTrackerError::NotImplemented)?
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 3,
"total_crates": null
} |
fn_clm_scheduler_success_handler_-2544748533679144568 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer/workflows
/// Callback function after successful execution of the `execute_workflow`
async fn success_handler<'a>(&'a self, _state: &'a T, _process: storage::ProcessTracker) {}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 3,
"total_crates": null
} |
fn_clm_scheduler_from_redis_stream_entry_1871656966899016325 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer/types/batch
// Inherent implementation for ProcessTrackerBatch
pub fn from_redis_stream_entry(
entry: HashMap<String, Option<String>>,
) -> CustomResult<Self, errors::ProcessTrackerError> {
let mut entry = entry;
let id = entry
.remove("id")
.flatten()
.get_required_value("id")
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
let group_name = entry
.remove("group_name")
.flatten()
.get_required_value("group_name")
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
let stream_name = entry
.remove("stream_name")
.flatten()
.get_required_value("stream_name")
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
let connection_name = entry
.remove("connection_name")
.flatten()
.get_required_value("connection_name")
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
let created_time = entry
.remove("created_time")
.flatten()
.get_required_value("created_time")
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
//make it parser error
let created_time = {
let offset_date_time = time::OffsetDateTime::from_unix_timestamp(
created_time
.as_str()
.parse::<i64>()
.change_context(errors::ParsingError::UnknownError)
.change_context(errors::ProcessTrackerError::DeserializationFailed)?,
)
.attach_printable_lazy(|| format!("Unable to parse time {}", &created_time))
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
PrimitiveDateTime::new(offset_date_time.date(), offset_date_time.time())
};
let rule = entry
.remove("rule")
.flatten()
.get_required_value("rule")
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
let trackers = entry
.remove("trackers")
.flatten()
.get_required_value("trackers")
.change_context(errors::ProcessTrackerError::MissingRequiredField)?;
let trackers = serde_json::from_str::<Vec<ProcessTracker>>(trackers.as_str())
.change_context(errors::ParsingError::UnknownError)
.attach_printable_lazy(|| {
format!("Unable to parse trackers from JSON string: {trackers:?}")
})
.change_context(errors::ProcessTrackerError::DeserializationFailed)
.attach_printable("Error parsing ProcessTracker from redis stream entry")?;
Ok(Self {
id,
group_name,
stream_name,
connection_name,
created_time,
rule,
trackers,
})
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 105,
"total_crates": null
} |
fn_clm_scheduler_to_redis_field_value_pairs_1871656966899016325 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer/types/batch
// Inherent implementation for ProcessTrackerBatch
pub fn to_redis_field_value_pairs(
&self,
) -> CustomResult<Vec<(&str, String)>, errors::ProcessTrackerError> {
Ok(vec![
("id", self.id.to_string()),
("group_name", self.group_name.to_string()),
("stream_name", self.stream_name.to_string()),
("connection_name", self.connection_name.to_string()),
(
"created_time",
self.created_time.assume_utc().unix_timestamp().to_string(),
),
("rule", self.rule.to_string()),
(
"trackers",
serde_json::to_string(&self.trackers)
.change_context(errors::ProcessTrackerError::SerializationFailed)
.attach_printable_lazy(|| {
format!("Unable to stringify trackers: {:?}", self.trackers)
})?,
),
])
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 21,
"total_crates": null
} |
fn_clm_scheduler_default_-4655560208043094689 | clm | function | // Repository: hyperswitch
// Crate: scheduler
// Purpose: Background task scheduling and execution
// Module: crates/scheduler/src/consumer/types/process_data
// Implementation of RevenueRecoveryPaymentProcessTrackerMapping for Default
fn default() -> Self {
Self {
default_mapping: RetryMapping {
// 1st attempt happens after 1 minute of it being
start_after: 60,
frequencies: vec![
// 2nd and 3rd attempts happen at intervals of 3 hours each
(60 * 60 * 3, 2),
// 4th, 5th, 6th attempts happen at intervals of 6 hours each
(60 * 60 * 6, 3),
// 7th, 8th, 9th attempts happen at intervals of 9 hour each
(60 * 60 * 9, 3),
// 10th, 11th and 12th attempts happen at intervals of 12 hours each
(60 * 60 * 12, 3),
// 13th, 14th and 15th attempts happen at intervals of 18 hours each
(60 * 60 * 18, 3),
],
},
custom_merchant_mapping: HashMap::new(),
}
}
| {
"crate": "scheduler",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 7705,
"total_crates": null
} |
fn_clm_smithy-generator_generate_model_registry_5837654474021952650 | clm | function | // Repository: hyperswitch
// Crate: smithy-generator
// Module: crates/smithy-generator/build
fn generate_model_registry(models: &[SmithyModelInfo]) -> Result<(), Box<dyn std::error::Error>> {
let out_dir = std::env::var("OUT_DIR").map_err(|_| "OUT_DIR environment variable not set")?;
let registry_path = Path::new(&out_dir).join("model_registry.rs");
let mut content = String::new();
content.push_str("// Auto-generated model registry\n");
content.push_str("// DO NOT EDIT - This file is generated by build.rs\n\n");
if !models.is_empty() {
content.push_str("use smithy_core::{SmithyModel, SmithyModelGenerator};\n\n");
// Generate imports
for model in models {
content.push_str(&format!(
"use {}::{};\n",
model.module_path, model.struct_name
));
}
content.push_str("\npub fn discover_smithy_models() -> Vec<SmithyModel> {\n");
content.push_str(" let mut models = Vec::new();\n\n");
// Generate model collection calls
for model in models {
content.push_str(&format!(
" models.push({}::generate_smithy_model());\n",
model.struct_name
));
}
content.push_str("\n models\n");
content.push_str("}\n");
} else {
// Generate empty function if no models found
content.push_str("use smithy_core::SmithyModel;\n\n");
content.push_str("pub fn discover_smithy_models() -> Vec<SmithyModel> {\n");
content.push_str(
" router_env::logger::info!(\"No SmithyModel structs found in workspace\");\n",
);
content.push_str(" Vec::new()\n");
content.push_str("}\n");
}
fs::write(®istry_path, content).map_err(|e| {
format!(
"Failed to write model registry to {}: {}",
registry_path.display(),
e
)
})?;
Ok(())
}
| {
"crate": "smithy-generator",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 47,
"total_crates": null
} |
fn_clm_smithy-generator_scan_directory_5837654474021952650 | clm | function | // Repository: hyperswitch
// Crate: smithy-generator
// Module: crates/smithy-generator/build
fn scan_directory(
dir: &Path,
crate_name: &str,
module_path: &str,
models: &mut Vec<SmithyModelInfo>,
) -> Result<(), Box<dyn std::error::Error>> {
if let Ok(entries) = fs::read_dir(dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.is_dir() {
let dir_name = match path.file_name() {
Some(name) => name.to_string_lossy(),
None => {
println!(
"cargo:warning=Skipping directory with invalid name: {}",
path.display()
);
continue;
}
};
let new_module_path = if module_path.is_empty() {
dir_name.to_string()
} else {
format!("{}::{}", module_path, dir_name)
};
scan_directory(&path, crate_name, &new_module_path, models)?;
} else if path.extension().map(|ext| ext == "rs").unwrap_or(false) {
if let Err(e) = scan_rust_file(&path, crate_name, module_path, models) {
println!(
"cargo:warning=Failed to scan Rust file {}: {}",
path.display(),
e
);
}
}
}
}
Ok(())
}
| {
"crate": "smithy-generator",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 32,
"total_crates": null
} |
fn_clm_smithy-generator_run_build_5837654474021952650 | clm | function | // Repository: hyperswitch
// Crate: smithy-generator
// Module: crates/smithy-generator/build
fn run_build() -> Result<(), Box<dyn std::error::Error>> {
let workspace_root = get_workspace_root()?;
let mut smithy_models = Vec::new();
// Scan all crates in the workspace for SmithyModel derives
let crates_dir = workspace_root.join("crates");
if let Ok(entries) = fs::read_dir(&crates_dir) {
for entry in entries.flatten() {
if entry.file_type().map(|ft| ft.is_dir()).unwrap_or(false) {
let crate_path = entry.path();
let crate_name = match crate_path.file_name() {
Some(name) => name.to_string_lossy(),
None => {
println!(
"cargo:warning=Skipping crate with invalid path: {}",
crate_path.display()
);
continue;
}
};
// Skip the smithy crate itself to avoid self-dependency
if crate_name == "smithy"
|| crate_name == "smithy-core"
|| crate_name == "smithy-generator"
{
continue;
}
if let Err(e) =
scan_crate_for_smithy_models(&crate_path, &crate_name, &mut smithy_models)
{
println!("cargo:warning=Failed to scan crate {}: {}", crate_name, e);
}
}
}
}
// Generate the registry file
generate_model_registry(&smithy_models)?;
Ok(())
}
| {
"crate": "smithy-generator",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 31,
"total_crates": null
} |
fn_clm_smithy-generator_scan_rust_file_5837654474021952650 | clm | function | // Repository: hyperswitch
// Crate: smithy-generator
// Module: crates/smithy-generator/build
fn scan_rust_file(
file_path: &Path,
crate_name: &str,
module_path: &str,
models: &mut Vec<SmithyModelInfo>,
) -> Result<(), Box<dyn std::error::Error>> {
if let Ok(content) = fs::read_to_string(file_path) {
// Enhanced regex that handles comments, doc comments, and multiple attributes
// between derive and struct/enum declarations
let re = Regex::new(r"(?ms)^#\[derive\(([^)]*(?:\([^)]*\))*[^)]*)\)\]\s*(?:(?:#\[[^\]]*\]\s*)|(?://[^\r\n]*\s*)|(?:///[^\r\n]*\s*)|(?:/\*.*?\*/\s*))*(?:pub\s+)?(?:struct|enum)\s+([A-Z][A-Za-z0-9_]*)\s*[<\{\(]")
.map_err(|e| format!("Failed to compile regex: {}", e))?;
for captures in re.captures_iter(&content) {
let derive_content = match captures.get(1) {
Some(capture) => capture.as_str(),
None => {
println!(
"cargo:warning=Missing derive content in regex capture for {}",
file_path.display()
);
continue;
}
};
let item_name = match captures.get(2) {
Some(capture) => capture.as_str(),
None => {
println!(
"cargo:warning=Missing item name in regex capture for {}",
file_path.display()
);
continue;
}
};
// Check if "SmithyModel" is present in the derive macro's content.
if derive_content.contains("SmithyModel") {
// Validate that the item name is a valid Rust identifier
if is_valid_rust_identifier(item_name) {
let full_module_path = create_module_path(file_path, crate_name, module_path)?;
models.push(SmithyModelInfo {
struct_name: item_name.to_string(),
module_path: full_module_path,
});
} else {
println!(
"cargo:warning=Skipping invalid identifier: {} in {}",
item_name,
file_path.display()
);
}
}
}
}
Ok(())
}
| {
"crate": "smithy-generator",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 29,
"total_crates": null
} |
fn_clm_smithy-generator_get_workspace_root_5837654474021952650 | clm | function | // Repository: hyperswitch
// Crate: smithy-generator
// Module: crates/smithy-generator/build
fn get_workspace_root() -> Result<std::path::PathBuf, Box<dyn std::error::Error>> {
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR")
.map_err(|_| "CARGO_MANIFEST_DIR environment variable not set")?;
let manifest_path = Path::new(&manifest_dir);
let parent1 = manifest_path
.parent()
.ok_or("Cannot get parent directory of CARGO_MANIFEST_DIR")?;
let workspace_root = parent1
.parent()
.ok_or("Cannot get workspace root directory")?;
Ok(workspace_root.to_path_buf())
}
| {
"crate": "smithy-generator",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 19,
"total_crates": null
} |
fn_clm_smithy-generator_main_-2201687459701091079 | clm | function | // Repository: hyperswitch
// Crate: smithy-generator
// Module: crates/smithy-generator/src/main
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut generator = SmithyGenerator::new();
logger::info!("Discovering Smithy models from workspace...");
// Automatically discover and add all models
let models = discover_smithy_models();
logger::info!("Found {} Smithy models", models.len());
if models.is_empty() {
logger::info!("No SmithyModel structs found. Make sure your structs:");
logger::info!(" 1. Derive SmithyModel: #[derive(SmithyModel)]");
logger::info!(" 2. Are in a crate that smithy can access");
logger::info!(" 3. Have the correct smithy attributes");
return Ok(());
}
for model in models {
logger::info!(" Processing namespace: {}", model.namespace);
let shape_names: Vec<_> = model.shapes.keys().collect();
logger::info!(" Shapes: {:?}", shape_names);
generator.add_model(model);
}
logger::info!("Generating Smithy IDL files...");
// Generate IDL files
let output_dir = Path::new("smithy/models");
let absolute_output_dir = std::env::current_dir()?.join(output_dir);
logger::info!("Output directory: {}", absolute_output_dir.display());
generator.generate_idl(output_dir)?;
logger::info!("✅ Smithy models generated successfully!");
logger::info!("Files written to: {}", absolute_output_dir.display());
// List generated files
if let Ok(entries) = std::fs::read_dir(output_dir) {
logger::info!("Generated files:");
for entry in entries.flatten() {
if entry.file_type().map(|ft| ft.is_file()).unwrap_or(false) {
logger::info!(" - {}", entry.file_name().to_string_lossy());
}
}
}
Ok(())
}
| {
"crate": "smithy-generator",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 34,
"total_crates": null
} |
fn_clm_openapi_modify_-8007642840924140990 | clm | function | // Repository: hyperswitch
// Crate: openapi
// Module: crates/openapi/src/openapi
// Implementation of SecurityAddon for utoipa::Modify
fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) {
use utoipa::openapi::security::{
ApiKey, ApiKeyValue, HttpAuthScheme, HttpBuilder, SecurityScheme,
};
if let Some(components) = openapi.components.as_mut() {
components.add_security_schemes_from_iter([
(
"api_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Use the API key created under your merchant account from the HyperSwitch dashboard. API key is used to authenticate API requests from your merchant server only. Don't expose this key on a website or embed it in a mobile application."
))),
),
(
"admin_api_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Admin API keys allow you to perform some privileged actions such as \
creating a merchant account and Merchant Connector account."
))),
),
(
"publishable_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Publishable keys are a type of keys that can be public and have limited \
scope of usage."
))),
),
(
"ephemeral_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Ephemeral keys provide temporary access to singular data, such as access \
to a single customer object for a short period of time."
))),
),
(
"jwt_key",
SecurityScheme::Http(HttpBuilder::new().scheme(HttpAuthScheme::Bearer).bearer_format("JWT").build())
)
]);
}
}
| {
"crate": "openapi",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 46,
"total_crates": null
} |
fn_clm_openapi_main_8900521235053656572 | clm | function | // Repository: hyperswitch
// Crate: openapi
// Module: crates/openapi/src/main
fn main() {
#[cfg(all(feature = "v1", feature = "v2"))]
compile_error!("features v1 and v2 are mutually exclusive, please enable only one of them");
#[cfg(feature = "v1")]
let relative_file_path = "api-reference/v1/openapi_spec_v1.json";
#[cfg(feature = "v2")]
let relative_file_path = "api-reference/v2/openapi_spec_v2.json";
#[cfg(any(feature = "v1", feature = "v2"))]
let mut file_path = router_env::workspace_path();
#[cfg(any(feature = "v1", feature = "v2"))]
file_path.push(relative_file_path);
#[cfg(feature = "v1")]
let openapi = <openapi::ApiDoc as utoipa::OpenApi>::openapi();
#[cfg(feature = "v2")]
let openapi = <openapi_v2::ApiDoc as utoipa::OpenApi>::openapi();
#[allow(clippy::expect_used)]
#[cfg(any(feature = "v1", feature = "v2"))]
std::fs::write(
&file_path,
openapi
.to_pretty_json()
.expect("Failed to serialize OpenAPI specification as JSON"),
)
.expect("Failed to write OpenAPI specification to file");
#[allow(clippy::expect_used)]
#[cfg(feature = "v1")]
{
// TODO: Do this using utoipa::extensions after we have upgraded to 5.x
let file_content =
std::fs::read_to_string(&file_path).expect("Failed to read OpenAPI specification file");
let mut lines: Vec<&str> = file_content.lines().collect();
// Insert the new text at line 3 (index 2)
if lines.len() > 2 {
let new_line = " \"x-mcp\": {\n \"enabled\": true\n },";
lines.insert(2, new_line);
}
let modified_content = lines.join("\n");
std::fs::write(&file_path, modified_content)
.expect("Failed to write modified OpenAPI specification to file");
}
#[cfg(any(feature = "v1", feature = "v2"))]
println!("Successfully saved OpenAPI specification file at '{relative_file_path}'");
#[cfg(not(any(feature = "v1", feature = "v2")))]
println!("No feature enabled to generate OpenAPI specification, please enable either 'v1' or 'v2' feature");
}
| {
"crate": "openapi",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 26,
"total_crates": null
} |
fn_clm_openapi_modify_-2835864390694535495 | clm | function | // Repository: hyperswitch
// Crate: openapi
// Module: crates/openapi/src/openapi_v2
// Implementation of SecurityAddon for utoipa::Modify
fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) {
use utoipa::openapi::security::{ApiKey, ApiKeyValue, SecurityScheme};
if let Some(components) = openapi.components.as_mut() {
components.add_security_schemes_from_iter([
(
"api_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Use the API key created under your merchant account from the HyperSwitch dashboard. API key is used to authenticate API requests from your merchant server only. Don't expose this key on a website or embed it in a mobile application."
))),
),
(
"admin_api_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Admin API keys allow you to perform some privileged actions such as \
creating a merchant account and Connector account."
))),
),
(
"publishable_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Publishable keys are a type of keys that can be public and have limited \
scope of usage."
))),
),
(
"ephemeral_key",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::with_description(
"api-key",
"Ephemeral keys provide temporary access to singular data, such as access \
to a single customer object for a short period of time."
))),
),
]);
}
}
| {
"crate": "openapi",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": false,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 36,
"total_crates": null
} |
fn_clm_openapi_create_token_vault_api_-2303296025807842275 | clm | function | // Repository: hyperswitch
// Crate: openapi
// Module: crates/openapi/src/routes/tokenization
pub async fn create_token_vault_api() {}
| {
"crate": "openapi",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 10,
"total_crates": null
} |
fn_clm_openapi_delete_tokenized_data_api_-2303296025807842275 | clm | function | // Repository: hyperswitch
// Crate: openapi
// Module: crates/openapi/src/routes/tokenization
pub async fn delete_tokenized_data_api() {}
| {
"crate": "openapi",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 10,
"total_crates": null
} |
fn_clm_openapi_proxy_core_-7160307250460333498 | clm | function | // Repository: hyperswitch
// Crate: openapi
// Module: crates/openapi/src/routes/proxy
pub async fn proxy_core() {}
| {
"crate": "openapi",
"file": null,
"file_size": null,
"is_async": false,
"is_pub": true,
"num_enums": null,
"num_structs": null,
"num_tables": null,
"score": 13,
"total_crates": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.