repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/connection_id.rs | src/udp/structs/connection_id.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct ConnectionId(pub i64); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/peer_key.rs | src/udp/structs/peer_key.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct PeerKey(pub u32); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/number_of_peers.rs | src/udp/structs/number_of_peers.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct NumberOfPeers(pub i32); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/error_response.rs | src/udp/structs/error_response.rs | use std::borrow::Cow;
use crate::udp::structs::transaction_id::TransactionId;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ErrorResponse {
pub transaction_id: TransactionId,
pub message: Cow<'static, str>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/scrape_request.rs | src/udp/structs/scrape_request.rs | use crate::tracker::structs::info_hash::InfoHash;
use crate::udp::structs::connection_id::ConnectionId;
use crate::udp::structs::transaction_id::TransactionId;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ScrapeRequest {
pub connection_id: ConnectionId,
pub transaction_id: TransactionId,
pub info_hashes: Vec<InfoHash>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/response_peer.rs | src/udp/structs/response_peer.rs | use crate::udp::structs::port::Port;
use crate::udp::traits::Ip;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ResponsePeer<I: Ip> {
pub ip_address: I,
pub port: Port,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/transaction_id.rs | src/udp/structs/transaction_id.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct TransactionId(pub i32); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/connect_response.rs | src/udp/structs/connect_response.rs | use crate::udp::structs::connection_id::ConnectionId;
use crate::udp::structs::transaction_id::TransactionId;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ConnectResponse {
pub connection_id: ConnectionId,
pub transaction_id: TransactionId,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/udp_server.rs | src/udp/structs/udp_server.rs | use std::sync::Arc;
use tokio::net::UdpSocket;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
#[derive(Debug)]
pub struct UdpServer {
pub(crate) socket: Arc<UdpSocket>,
pub(crate) udp_threads: usize,
pub(crate) worker_threads: usize,
pub(crate) tracker: Arc<TorrentTracker>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/port.rs | src/udp/structs/port.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct Port(pub u16); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/connect_request.rs | src/udp/structs/connect_request.rs | use crate::udp::structs::transaction_id::TransactionId;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ConnectRequest {
pub transaction_id: TransactionId,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/number_of_downloads.rs | src/udp/structs/number_of_downloads.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct NumberOfDownloads(pub i32); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/announce_interval.rs | src/udp/structs/announce_interval.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct AnnounceInterval(pub i32); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/structs/announce_response.rs | src/udp/structs/announce_response.rs | use crate::udp::structs::announce_interval::AnnounceInterval;
use crate::udp::structs::number_of_peers::NumberOfPeers;
use crate::udp::structs::response_peer::ResponsePeer;
use crate::udp::structs::transaction_id::TransactionId;
use crate::udp::traits::Ip;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct AnnounceResponse<I: Ip> {
pub transaction_id: TransactionId,
pub announce_interval: AnnounceInterval,
pub leechers: NumberOfPeers,
pub seeders: NumberOfPeers,
pub peers: Vec<ResponsePeer<I>>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/enums/response.rs | src/udp/enums/response.rs | use std::net::{Ipv4Addr, Ipv6Addr};
use crate::udp::structs::announce_response::AnnounceResponse;
use crate::udp::structs::connect_response::ConnectResponse;
use crate::udp::structs::error_response::ErrorResponse;
use crate::udp::structs::scrape_response::ScrapeResponse;
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum Response {
Connect(ConnectResponse),
AnnounceIpv4(AnnounceResponse<Ipv4Addr>),
AnnounceIpv6(AnnounceResponse<Ipv6Addr>),
Scrape(ScrapeResponse),
Error(ErrorResponse),
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/enums/request.rs | src/udp/enums/request.rs | use crate::udp::structs::announce_request::AnnounceRequest;
use crate::udp::structs::connect_request::ConnectRequest;
use crate::udp::structs::scrape_request::ScrapeRequest;
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum Request {
Connect(ConnectRequest),
Announce(AnnounceRequest),
Scrape(ScrapeRequest),
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/enums/server_error.rs | src/udp/enums/server_error.rs | use thiserror::Error;
#[derive(Error, Debug)]
pub enum ServerError {
#[error("internal server error")]
InternalServerError,
#[error("info_hash is either missing or invalid")]
InvalidInfoHash,
#[error("info_hash unknown")]
UnknownInfoHash,
#[error("could not find remote address")]
AddressNotFound,
#[error("torrent has no peers")]
NoPeersFound,
#[error("torrent not on whitelist")]
TorrentNotWhitelisted,
#[error("torrent blacklist")]
TorrentBlacklisted,
#[error("unknown key")]
UnknownKey,
#[error("peer not authenticated")]
PeerNotAuthenticated,
#[error("invalid authentication key")]
PeerKeyNotValid,
#[error("exceeded info_hash limit")]
ExceededInfoHashLimit,
#[error("bad request")]
BadRequest,
#[error("maintenance mode enabled, please try again later")]
MaintenanceMode,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/udp/enums/request_parse_error.rs | src/udp/enums/request_parse_error.rs | use std::borrow::Cow;
use crate::udp::structs::connection_id::ConnectionId;
use crate::udp::structs::transaction_id::TransactionId;
#[derive(Debug)]
pub enum RequestParseError {
Sendable {
connection_id: ConnectionId,
transaction_id: TransactionId,
err: Cow<'static, str>,
},
Unsendable {
err: Cow<'static, str>,
},
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/tests.rs | src/tracker/tests.rs | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false | |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs.rs | src/tracker/structs.rs | pub mod torrent_tracker;
pub mod announce_query_request;
pub mod info_hash;
pub mod peer_id;
pub mod scrape_query_request;
pub mod torrent_entry;
pub mod torrent_peer;
pub mod user_entry_item;
pub mod user_id;
pub mod torrent_peers;
pub mod torrent_sharding;
pub mod cleanup_stats_atomic;
pub mod padded_atomic_u64; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/enums.rs | src/tracker/enums.rs | pub mod announce_event;
pub mod announce_event_def;
pub mod torrent_peers_type;
pub mod updates_action; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/types.rs | src/tracker/types.rs | pub mod torrents_updates;
pub mod keys_updates;
pub mod users_updates; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/mod.rs | src/tracker/mod.rs | pub mod enums;
pub mod impls;
pub mod structs;
pub mod types;
pub mod tests; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls.rs | src/tracker/impls.rs | pub mod info_hash;
pub mod peer_id;
pub mod torrent_entry;
pub mod torrent_peer;
pub mod torrent_tracker;
pub mod torrent_tracker_keys;
pub mod torrent_tracker_peers;
pub mod torrent_tracker_torrents;
pub mod torrent_tracker_handlers;
pub mod torrent_tracker_torrents_blacklist;
pub mod torrent_tracker_torrents_updates;
pub mod torrent_tracker_torrents_whitelist;
pub mod torrent_tracker_users;
pub mod torrent_tracker_users_updates;
pub mod user_id;
pub mod announce_event;
pub mod torrent_sharding;
pub mod torrent_tracker_import;
pub mod torrent_tracker_export;
pub mod torrent_tracker_cert_gen;
pub mod torrent_tracker_torrents_blacklist_updates;
pub mod torrent_tracker_torrents_whitelist_updates;
pub mod torrent_tracker_keys_updates;
pub mod cleanup_stats_atomics; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker.rs | src/tracker/impls/torrent_tracker.rs | use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicI64};
use chrono::Utc;
use parking_lot::RwLock;
use crate::config::structs::configuration::Configuration;
use crate::database::structs::database_connector::DatabaseConnector;
use crate::stats::structs::stats_atomics::StatsAtomics;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn new(config: Arc<Configuration>, create_database: bool) -> TorrentTracker
{
TorrentTracker {
config: config.clone(),
torrents_sharding: Arc::new(Default::default()),
torrents_updates: Arc::new(RwLock::new(HashMap::new())),
torrents_whitelist: Arc::new(RwLock::new(Vec::new())),
torrents_whitelist_updates: Arc::new(RwLock::new(HashMap::new())),
torrents_blacklist: Arc::new(RwLock::new(Vec::new())),
torrents_blacklist_updates: Arc::new(RwLock::new(HashMap::new())),
keys: Arc::new(RwLock::new(BTreeMap::new())),
keys_updates: Arc::new(RwLock::new(HashMap::new())),
stats: Arc::new(StatsAtomics {
started: AtomicI64::new(Utc::now().timestamp()),
timestamp_run_save: AtomicI64::new(0),
timestamp_run_timeout: AtomicI64::new(0),
timestamp_run_console: AtomicI64::new(0),
timestamp_run_keys_timeout: AtomicI64::new(0),
torrents: AtomicI64::new(0),
torrents_updates: AtomicI64::new(0),
users: AtomicI64::new(0),
users_updates: AtomicI64::new(0),
seeds: AtomicI64::new(0),
peers: AtomicI64::new(0),
completed: AtomicI64::new(0),
whitelist_enabled: AtomicBool::new(config.tracker_config.clone().whitelist_enabled),
whitelist: AtomicI64::new(0),
whitelist_updates: AtomicI64::new(0),
blacklist_enabled: AtomicBool::new(config.tracker_config.clone().blacklist_enabled),
blacklist: AtomicI64::new(0),
blacklist_updates: AtomicI64::new(0),
keys_enabled: AtomicBool::new(config.tracker_config.clone().keys_enabled),
keys: AtomicI64::new(0),
keys_updates: AtomicI64::new(0),
tcp4_connections_handled: AtomicI64::new(0),
tcp4_api_handled: AtomicI64::new(0),
tcp4_announces_handled: AtomicI64::new(0),
tcp4_scrapes_handled: AtomicI64::new(0),
tcp4_not_found: AtomicI64::new(0),
tcp4_failure: AtomicI64::new(0),
tcp6_connections_handled: AtomicI64::new(0),
tcp6_api_handled: AtomicI64::new(0),
tcp6_announces_handled: AtomicI64::new(0),
tcp6_scrapes_handled: AtomicI64::new(0),
tcp6_not_found: AtomicI64::new(0),
tcp6_failure: AtomicI64::new(0),
udp4_invalid_request: AtomicI64::new(0),
udp4_bad_request: AtomicI64::new(0),
udp4_connections_handled: AtomicI64::new(0),
udp4_announces_handled: AtomicI64::new(0),
udp4_scrapes_handled: AtomicI64::new(0),
udp6_invalid_request: AtomicI64::new(0),
udp6_bad_request: AtomicI64::new(0),
udp6_connections_handled: AtomicI64::new(0),
udp6_announces_handled: AtomicI64::new(0),
udp6_scrapes_handled: AtomicI64::new(0),
udp_queue_len: AtomicI64::new(0),
}),
users: Arc::new(RwLock::new(BTreeMap::new())),
users_updates: Arc::new(RwLock::new(HashMap::new())),
sqlx: DatabaseConnector::new(config.clone(), create_database).await,
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_users_updates.rs | src/tracker/impls/torrent_tracker_users_updates.rs | use std::collections::{BTreeMap, HashMap};
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::time::SystemTime;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
use crate::tracker::structs::user_entry_item::UserEntryItem;
use crate::tracker::structs::user_id::UserId;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub fn add_user_update(&self, user_id: UserId, user_entry_item: UserEntryItem, updates_action: UpdatesAction) -> (UserEntryItem, bool)
{
let mut lock = self.users_updates.write();
let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
if lock.insert(timestamp, (user_id, user_entry_item.clone(), updates_action)).is_none() {
self.update_stats(StatsEvent::UsersUpdates, 1);
(user_entry_item, true)
} else {
(user_entry_item, false)
}
}
#[tracing::instrument(level = "debug")]
pub fn get_user_updates(&self) -> HashMap<u128, (UserId, UserEntryItem, UpdatesAction)>
{
let lock = self.users_updates.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn remove_user_update(&self, timestamp: &u128) -> bool
{
let mut lock = self.users_updates.write();
if lock.remove(timestamp).is_some() {
self.update_stats(StatsEvent::UsersUpdates, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn clear_user_updates(&self)
{
let mut lock = self.users_updates.write();
lock.clear();
self.set_stats(StatsEvent::UsersUpdates, 0);
}
#[tracing::instrument(level = "debug")]
pub async fn save_user_updates(&self, torrent_tracker: Arc<TorrentTracker>) -> Result<(), ()>
{
let updates = {
let lock = self.users_updates.read_recursive();
lock.clone()
};
if updates.is_empty() {
return Ok(());
}
let mut mapping: HashMap<UserId, (u128, UserEntryItem, UpdatesAction)> = HashMap::with_capacity(updates.len());
let mut timestamps_to_remove = Vec::new();
for (timestamp, (user_id, user_entry_item, updates_action)) in updates {
match mapping.entry(user_id) {
Entry::Occupied(mut o) => {
let existing = o.get();
if timestamp > existing.0 {
timestamps_to_remove.push(existing.0);
o.insert((timestamp, user_entry_item, updates_action));
} else {
timestamps_to_remove.push(timestamp);
}
}
Entry::Vacant(v) => {
v.insert((timestamp, user_entry_item, updates_action));
}
}
}
let mapping_len = mapping.len();
let users_to_save: BTreeMap<UserId, (UserEntryItem, UpdatesAction)> = mapping
.iter()
.map(|(user_id, (_, user_entry_item, updates_action))| (*user_id, (user_entry_item.clone(), *updates_action)))
.collect();
match self.save_users(torrent_tracker, users_to_save).await {
Ok(_) => {
info!("[SYNC USER UPDATES] Synced {mapping_len} users");
let mut lock = self.users_updates.write();
let mut removed_count = 0i64;
for (_, (timestamp, _, _)) in mapping {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
for timestamp in timestamps_to_remove {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
if removed_count > 0 {
self.update_stats(StatsEvent::UsersUpdates, -removed_count);
}
Ok(())
}
Err(_) => {
error!("[SYNC USER UPDATES] Unable to sync {mapping_len} users");
Err(())
}
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/user_id.rs | src/tracker/impls/user_id.rs | use std::fmt;
use std::fmt::Formatter;
use crate::common::common::bin2hex;
use crate::tracker::structs::user_id::UserId;
impl fmt::Display for UserId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
bin2hex(&self.0, f)
}
}
impl std::str::FromStr for UserId {
type Err = binascii::ConvertError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.len() != 40 {
return Err(binascii::ConvertError::InvalidInputLength);
}
let mut result = UserId([0u8; 20]);
let bytes = s.as_bytes();
for i in 0..20 {
let high = hex_to_nibble(bytes[i * 2]);
let low = hex_to_nibble(bytes[i * 2 + 1]);
if high == 0xFF || low == 0xFF {
return Err(binascii::ConvertError::InvalidInput);
}
result.0[i] = (high << 4) | low;
}
Ok(result)
}
}
impl From<&[u8]> for UserId {
fn from(data: &[u8]) -> UserId {
assert_eq!(data.len(), 20);
let mut ret = UserId([0u8; 20]);
ret.0.copy_from_slice(data);
ret
}
}
impl From<[u8; 20]> for UserId {
fn from(data: [u8; 20]) -> Self {
UserId(data)
}
}
impl serde::ser::Serialize for UserId {
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
let mut buffer = [0u8; 40];
for (i, &byte) in self.0.iter().enumerate() {
buffer[i * 2] = HEX_CHARS[(byte >> 4) as usize];
buffer[i * 2 + 1] = HEX_CHARS[(byte & 0xf) as usize];
}
// SAFETY: We know the buffer contains only valid ASCII hex characters
let str_out = unsafe { std::str::from_utf8_unchecked(&buffer) };
serializer.serialize_str(str_out)
}
}
impl<'de> serde::de::Deserialize<'de> for UserId {
fn deserialize<D: serde::de::Deserializer<'de>>(des: D) -> Result<Self, D::Error> {
struct UserIdVisitor;
impl<'de> serde::de::Visitor<'de> for UserIdVisitor {
type Value = UserId;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a 40 character hex string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() != 40 {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(v),
&"expected a 40 character long string",
));
}
let mut result = UserId([0u8; 20]);
let bytes = v.as_bytes();
for i in 0..20 {
let high = hex_to_nibble(bytes[i * 2]);
let low = hex_to_nibble(bytes[i * 2 + 1]);
if high == 0xFF || low == 0xFF {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(v),
&"expected a hexadecimal string",
));
}
result.0[i] = (high << 4) | low;
}
Ok(result)
}
}
des.deserialize_str(UserIdVisitor)
}
}
#[inline(always)]
fn hex_to_nibble(c: u8) -> u8 {
match c {
b'0'..=b'9' => c - b'0',
b'a'..=b'f' => c - b'a' + 10,
b'A'..=b'F' => c - b'A' + 10,
_ => 0xFF,
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_import.rs | src/tracker/impls/torrent_tracker_import.rs | use std::collections::BTreeMap;
use std::fs;
use std::process::exit;
use std::sync::Arc;
use log::{error, info};
use serde_json::Value;
use crate::structs::Cli;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_entry::TorrentEntry;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
use crate::tracker::structs::user_entry_item::UserEntryItem;
use crate::tracker::structs::user_id::UserId;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn import(&self, args: &Cli, tracker: Arc<TorrentTracker>)
{
info!("[IMPORT] Requesting to import data");
let config = &tracker.config.tracker_config;
let torrents_file = &args.import_file_torrents;
info!("[IMPORT] Importing torrents to memory {torrents_file}");
let data = fs::read(torrents_file)
.unwrap_or_else(|error| {
error!("[IMPORT] The torrents file {torrents_file} could not be imported!");
panic!("[IMPORT] {error}")
});
let torrents: Value = serde_json::from_slice(&data)
.expect("[IMPORT] Failed to parse torrents JSON");
let torrents_obj = torrents.as_object()
.expect("[IMPORT] Torrents data is not a JSON object");
for (key, value) in torrents_obj {
let completed = value["completed"].as_u64()
.expect("[IMPORT] 'completed' field doesn't exist or is missing!");
let hash_bytes = hex::decode(key)
.expect("[IMPORT] Torrent hash is not hex or invalid!");
let info_hash = InfoHash(hash_bytes[..20].try_into()
.expect("[IMPORT] Invalid hash length"));
tracker.add_torrent_update(info_hash, TorrentEntry {
seeds: Default::default(),
peers: Default::default(),
completed,
updated: std::time::Instant::now(),
}, UpdatesAction::Add);
}
tracker.save_torrent_updates(Arc::clone(&tracker)).await
.expect("[IMPORT] Unable to save torrents to the database!");
if config.whitelist_enabled {
let whitelists_file = &args.import_file_whitelists;
info!("[IMPORT] Importing whitelists to memory {whitelists_file}");
let data = fs::read(whitelists_file)
.unwrap_or_else(|error| {
error!("[IMPORT] The whitelists file {whitelists_file} could not be imported!");
panic!("[IMPORT] {error}")
});
let whitelists: Value = serde_json::from_slice(&data)
.expect("[IMPORT] Failed to parse whitelists JSON");
let whitelists_array = whitelists.as_array()
.expect("[IMPORT] Whitelists data is not a JSON array");
for value in whitelists_array {
let hash_str = value.as_str()
.expect("[IMPORT] Whitelist entry is not a string");
let hash_bytes = hex::decode(hash_str)
.expect("[IMPORT] Torrent hash is not hex or invalid!");
let info_hash = InfoHash(hash_bytes[..20].try_into()
.expect("[IMPORT] Invalid hash length"));
tracker.add_whitelist_update(info_hash, UpdatesAction::Add);
}
tracker.save_whitelist_updates(Arc::clone(&tracker)).await
.expect("[IMPORT] Unable to save whitelist to the database!");
}
if config.blacklist_enabled {
let blacklists_file = &args.import_file_blacklists;
info!("[IMPORT] Importing blacklists to memory {blacklists_file}");
let data = fs::read(blacklists_file)
.unwrap_or_else(|error| {
error!("[IMPORT] The blacklists file {blacklists_file} could not be imported!");
panic!("[IMPORT] {error}")
});
let blacklists: Value = serde_json::from_slice(&data)
.expect("[IMPORT] Failed to parse blacklists JSON");
let blacklists_array = blacklists.as_array()
.expect("[IMPORT] Blacklists data is not a JSON array");
for value in blacklists_array {
let hash_str = value.as_str()
.expect("[IMPORT] Blacklist entry is not a string");
let hash_bytes = hex::decode(hash_str)
.expect("[IMPORT] Torrent hash is not hex or invalid!");
let info_hash = InfoHash(hash_bytes[..20].try_into()
.expect("[IMPORT] Invalid hash length"));
tracker.add_blacklist_update(info_hash, UpdatesAction::Add);
}
tracker.save_blacklist_updates(Arc::clone(&tracker)).await
.expect("[IMPORT] Unable to save blacklist to the database!");
}
if config.keys_enabled {
let keys_file = &args.import_file_keys;
info!("[IMPORT] Importing keys to memory {keys_file}");
let data = fs::read(keys_file)
.unwrap_or_else(|error| {
error!("[IMPORT] The keys file {keys_file} could not be imported!");
panic!("[IMPORT] {error}")
});
let keys: Value = serde_json::from_slice(&data)
.expect("[IMPORT] Failed to parse keys JSON");
let keys_obj = keys.as_object()
.expect("[IMPORT] Keys data is not a JSON object");
for (key, value) in keys_obj {
let timeout = value.as_i64()
.expect("[IMPORT] timeout value doesn't exist or is missing!");
let hash_bytes = hex::decode(key)
.expect("[IMPORT] Key hash is not hex or invalid!");
let hash = InfoHash(hash_bytes[..20].try_into()
.expect("[IMPORT] Invalid hash length"));
tracker.add_key_update(hash, timeout, UpdatesAction::Add);
}
tracker.save_key_updates(Arc::clone(&tracker)).await
.expect("[IMPORT] Unable to save keys to the database!");
}
if config.users_enabled {
let users_file = &args.import_file_users;
info!("[IMPORT] Importing users to memory {users_file}");
let data = fs::read(users_file)
.unwrap_or_else(|error| {
error!("[IMPORT] The users file {users_file} could not be imported!");
panic!("[IMPORT] {error}")
});
let users: Value = serde_json::from_slice(&data)
.expect("[IMPORT] Failed to parse users JSON");
let users_obj = users.as_object()
.expect("[IMPORT] Users data is not a JSON object");
for (key, value) in users_obj {
let user_hash_bytes = hex::decode(key)
.expect("[IMPORT] User hash is not hex or invalid!");
let user_hash = UserId(user_hash_bytes[..20].try_into()
.expect("[IMPORT] Invalid hash length"));
let key_str = value["key"].as_str()
.expect("[IMPORT] Key field is missing or not a string");
let key_hash_bytes = hex::decode(key_str)
.expect("[IMPORT] Key hash is not hex or invalid!");
let key_hash = UserId(key_hash_bytes[..20].try_into()
.expect("[IMPORT] Invalid hash length"));
let user_entry = UserEntryItem {
key: key_hash,
user_id: value["user_id"].as_u64(),
user_uuid: value["user_uuid"].as_str().map(String::from),
uploaded: value["uploaded"].as_u64()
.expect("[IMPORT] 'uploaded' field doesn't exist or is missing!"),
downloaded: value["downloaded"].as_u64()
.expect("[IMPORT] 'downloaded' field doesn't exist or is missing!"),
completed: value["completed"].as_u64()
.expect("[IMPORT] 'completed' field doesn't exist or is missing!"),
updated: value["updated"].as_u64()
.expect("[IMPORT] 'updated' field doesn't exist or is missing!"),
active: value["active"].as_u64()
.expect("[IMPORT] 'active' field doesn't exist or is missing!") as u8,
torrents_active: BTreeMap::new()
};
tracker.add_user_update(user_hash, user_entry, UpdatesAction::Add);
}
tracker.save_user_updates(Arc::clone(&tracker)).await
.expect("[IMPORT] Unable to save users to the database!");
}
info!("[IMPORT] Importing of data completed");
exit(0)
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_cert_gen.rs | src/tracker/impls/torrent_tracker_cert_gen.rs | use std::fs;
use std::process::exit;
use log::{error, info};
use rcgen::{generate_simple_self_signed, CertifiedKey};
use crate::structs::Cli;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn cert_gen(&self, args: &Cli)
{
info!("[CERTGEN] Requesting to generate a self-signed key and certificate file");
let mut subject_alt_names = vec![
String::from("localhost")
];
if args.selfsigned_domain != "localhost" {
subject_alt_names.push(args.selfsigned_domain.clone());
}
let CertifiedKey { cert, signing_key } = generate_simple_self_signed(subject_alt_names)
.expect("[CERTGEN] Failed to generate self-signed certificate");
let keyfile = &args.selfsigned_keyfile;
let certfile = &args.selfsigned_certfile;
if let Err(error) = fs::write(keyfile, signing_key.serialize_pem()) {
error!("[CERTGEN] The key file {keyfile} could not be generated!");
panic!("[CERTGEN] {error}")
}
info!("[CERTGEN] The key file {keyfile} has been generated");
if let Err(error) = fs::write(certfile, cert.pem()) {
error!("[CERTGEN] The cert file {certfile} could not be generated!");
panic!("[CERTGEN] {error}")
}
info!("[CERTGEN] The cert file {certfile} has been generated");
info!("[CERTGEN] The files {keyfile} and {certfile} have been generated, use them only for development reasons");
exit(0)
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_torrents.rs | src/tracker/impls/torrent_tracker_torrents.rs | use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use std::sync::Arc;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_entry::TorrentEntry;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn load_torrents(&self, tracker: Arc<TorrentTracker>)
{
if let Ok((torrents, completes)) = self.sqlx.load_torrents(tracker).await {
info!("Loaded {torrents} torrents with {completes} completes");
}
}
#[tracing::instrument(level = "debug")]
pub async fn save_torrents(&self, tracker: Arc<TorrentTracker>, torrents: BTreeMap<InfoHash, (TorrentEntry, UpdatesAction)>) -> Result<(), ()>
{
let torrents_count = torrents.len();
match self.sqlx.save_torrents(tracker, torrents).await {
Ok(_) => {
info!("[SYNC TORRENTS] Synced {torrents_count} torrents");
Ok(())
}
Err(_) => {
error!("[SYNC TORRENTS] Unable to sync {torrents_count} torrents");
Err(())
}
}
}
#[tracing::instrument(level = "debug")]
pub async fn reset_seeds_peers(&self, tracker: Arc<TorrentTracker>) -> bool
{
match self.sqlx.reset_seeds_peers(tracker).await {
Ok(_) => {
info!("[RESET SEEDS PEERS] Completed");
true
}
Err(_) => {
error!("[RESET SEEDS PEERS] Unable to reset the seeds and peers");
false
}
}
}
#[tracing::instrument(level = "debug")]
pub fn add_torrent(&self, info_hash: InfoHash, torrent_entry: TorrentEntry) -> (TorrentEntry, bool)
{
let shard = self.torrents_sharding.get_shard(info_hash.0[0]).unwrap();
let mut lock = shard.write();
match lock.entry(info_hash) {
Entry::Vacant(v) => {
self.update_stats(StatsEvent::Torrents, 1);
self.update_stats(StatsEvent::Completed, torrent_entry.completed as i64);
self.update_stats(StatsEvent::Seeds, torrent_entry.seeds.len() as i64);
self.update_stats(StatsEvent::Peers, torrent_entry.peers.len() as i64);
let entry_clone = torrent_entry.clone();
v.insert(torrent_entry);
(entry_clone, true)
}
Entry::Occupied(mut o) => {
let current = o.get_mut();
let completed_delta = torrent_entry.completed as i64 - current.completed as i64;
let seeds_delta = torrent_entry.seeds.len() as i64 - current.seeds.len() as i64;
let peers_delta = torrent_entry.peers.len() as i64 - current.peers.len() as i64;
if completed_delta != 0 {
self.update_stats(StatsEvent::Completed, completed_delta);
}
if seeds_delta != 0 {
self.update_stats(StatsEvent::Seeds, seeds_delta);
}
if peers_delta != 0 {
self.update_stats(StatsEvent::Peers, peers_delta);
}
current.completed = torrent_entry.completed;
current.seeds = torrent_entry.seeds;
current.peers = torrent_entry.peers;
current.updated = torrent_entry.updated;
(current.clone(), false)
}
}
}
#[tracing::instrument(level = "debug")]
pub fn add_torrents(&self, hashes: BTreeMap<InfoHash, TorrentEntry>) -> BTreeMap<InfoHash, (TorrentEntry, bool)>
{
hashes.into_iter()
.map(|(info_hash, torrent_entry)| {
let result = self.add_torrent(info_hash, torrent_entry);
(info_hash, result)
})
.collect()
}
#[tracing::instrument(level = "debug")]
pub fn get_torrent(&self, info_hash: InfoHash) -> Option<TorrentEntry>
{
let shard = self.torrents_sharding.get_shard(info_hash.0[0]).unwrap();
let lock = shard.read_recursive();
lock.get(&info_hash).cloned()
}
#[tracing::instrument(level = "debug")]
pub fn get_torrents(&self, hashes: Vec<InfoHash>) -> BTreeMap<InfoHash, Option<TorrentEntry>>
{
hashes.into_iter()
.map(|info_hash| {
let entry = self.get_torrent(info_hash);
(info_hash, entry)
})
.collect()
}
#[tracing::instrument(level = "debug")]
pub fn remove_torrent(&self, info_hash: InfoHash) -> Option<TorrentEntry>
{
if !self.torrents_sharding.contains_torrent(info_hash) {
return None;
}
let shard = self.torrents_sharding.get_shard(info_hash.0[0]).unwrap();
let mut lock = shard.write();
if let Some(data) = lock.remove(&info_hash) {
self.update_stats(StatsEvent::Torrents, -1);
self.update_stats(StatsEvent::Seeds, -(data.seeds.len() as i64));
self.update_stats(StatsEvent::Peers, -(data.peers.len() as i64));
Some(data)
} else {
None
}
}
#[tracing::instrument(level = "debug")]
pub fn remove_torrents(&self, hashes: Vec<InfoHash>) -> BTreeMap<InfoHash, Option<TorrentEntry>>
{
hashes.into_iter()
.map(|info_hash| {
let result = self.remove_torrent(info_hash);
(info_hash, result)
})
.collect()
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/info_hash.rs | src/tracker/impls/info_hash.rs | use std::fmt;
use std::fmt::Formatter;
use crate::common::common::bin2hex;
use crate::tracker::structs::info_hash::InfoHash;
impl fmt::Display for InfoHash {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
bin2hex(&self.0, f)
}
}
impl std::str::FromStr for InfoHash {
type Err = binascii::ConvertError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.len() != 40 {
return Err(binascii::ConvertError::InvalidInputLength);
}
let mut result = InfoHash([0u8; 20]);
let bytes = s.as_bytes();
for (i, chunk) in bytes.chunks_exact(2).enumerate() {
let high = hex_to_nibble(chunk[0]);
let low = hex_to_nibble(chunk[1]);
if high == 0xFF || low == 0xFF {
return Err(binascii::ConvertError::InvalidInput);
}
result.0[i] = (high << 4) | low;
}
Ok(result)
}
}
impl From<&[u8]> for InfoHash {
fn from(data: &[u8]) -> InfoHash {
assert_eq!(data.len(), 20);
let mut ret = InfoHash([0u8; 20]);
ret.0.copy_from_slice(data);
ret
}
}
impl From<[u8; 20]> for InfoHash {
fn from(data: [u8; 20]) -> Self {
InfoHash(data)
}
}
impl serde::ser::Serialize for InfoHash {
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
let mut buffer = [0u8; 40];
for (i, &byte) in self.0.iter().enumerate() {
let idx = i * 2;
buffer[idx] = HEX_CHARS[(byte >> 4) as usize];
buffer[idx + 1] = HEX_CHARS[(byte & 0xf) as usize];
}
// SAFETY: We know the buffer contains only valid ASCII hex characters
let str_out = unsafe { std::str::from_utf8_unchecked(&buffer) };
serializer.serialize_str(str_out)
}
}
impl<'de> serde::de::Deserialize<'de> for InfoHash {
fn deserialize<D: serde::de::Deserializer<'de>>(des: D) -> Result<Self, D::Error> {
struct InfoHashVisitor;
impl<'de> serde::de::Visitor<'de> for InfoHashVisitor {
type Value = InfoHash;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a 40 character hex string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() != 40 {
return Err(E::custom("expected 40 character hex string"));
}
let mut result = InfoHash([0u8; 20]);
let bytes = v.as_bytes();
for (i, chunk) in bytes.chunks_exact(2).enumerate() {
let high = hex_to_nibble(chunk[0]);
let low = hex_to_nibble(chunk[1]);
if high == 0xFF || low == 0xFF {
return Err(E::custom("invalid hex character"));
}
result.0[i] = (high << 4) | low;
}
Ok(result)
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() != 40 {
return Err(E::custom("expected 40 byte hex string"));
}
let mut result = InfoHash([0u8; 20]);
for (i, chunk) in v.chunks_exact(2).enumerate() {
let high = hex_to_nibble(chunk[0]);
let low = hex_to_nibble(chunk[1]);
if high == 0xFF || low == 0xFF {
return Err(E::custom("invalid hex character"));
}
result.0[i] = (high << 4) | low;
}
Ok(result)
}
}
des.deserialize_str(InfoHashVisitor)
}
}
#[inline(always)]
fn hex_to_nibble(c: u8) -> u8 {
match c {
b'0'..=b'9' => c - b'0',
b'a'..=b'f' => c - b'a' + 10,
b'A'..=b'F' => c - b'A' + 10,
_ => 0xFF,
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_torrents_whitelist_updates.rs | src/tracker/impls/torrent_tracker_torrents_whitelist_updates.rs | use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::time::SystemTime;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub fn add_whitelist_update(&self, info_hash: InfoHash, updates_action: UpdatesAction) -> bool
{
let mut lock = self.torrents_whitelist_updates.write();
let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
if lock.insert(timestamp, (info_hash, updates_action)).is_none() {
self.update_stats(StatsEvent::WhitelistUpdates, 1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn add_whitelist_updates(&self, hashes: Vec<(InfoHash, UpdatesAction)>) -> Vec<(InfoHash, bool)>
{
let mut lock = self.torrents_whitelist_updates.write();
let mut returned_data = Vec::with_capacity(hashes.len());
let mut success_count = 0i64;
for (info_hash, updates_action) in hashes {
let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
let success = lock.insert(timestamp, (info_hash, updates_action)).is_none();
if success {
success_count += 1;
}
returned_data.push((info_hash, success));
}
if success_count > 0 {
self.update_stats(StatsEvent::WhitelistUpdates, success_count);
}
returned_data
}
#[tracing::instrument(level = "debug")]
pub fn get_whitelist_updates(&self) -> HashMap<u128, (InfoHash, UpdatesAction)>
{
let lock = self.torrents_whitelist_updates.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn remove_whitelist_update(&self, timestamp: &u128) -> bool
{
let mut lock = self.torrents_whitelist_updates.write();
if lock.remove(timestamp).is_some() {
self.update_stats(StatsEvent::WhitelistUpdates, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn clear_whitelist_updates(&self)
{
let mut lock = self.torrents_whitelist_updates.write();
lock.clear();
self.set_stats(StatsEvent::WhitelistUpdates, 0);
}
#[tracing::instrument(level = "debug")]
pub async fn save_whitelist_updates(&self, torrent_tracker: Arc<TorrentTracker>) -> Result<(), ()>
{
let updates = {
let lock = self.torrents_whitelist_updates.read_recursive();
lock.clone()
};
if updates.is_empty() {
return Ok(());
}
let mut mapping: HashMap<InfoHash, (u128, UpdatesAction)> = HashMap::with_capacity(updates.len());
let mut timestamps_to_remove = Vec::new();
for (timestamp, (info_hash, updates_action)) in updates {
match mapping.entry(info_hash) {
Entry::Occupied(mut o) => {
let existing = o.get();
if timestamp > existing.0 {
timestamps_to_remove.push(existing.0);
o.insert((timestamp, updates_action));
} else {
timestamps_to_remove.push(timestamp);
}
}
Entry::Vacant(v) => {
v.insert((timestamp, updates_action));
}
}
}
let mapping_len = mapping.len();
let whitelist_updates: Vec<(InfoHash, UpdatesAction)> = mapping
.iter()
.map(|(info_hash, (_, updates_action))| (*info_hash, *updates_action))
.collect();
match self.save_whitelist(torrent_tracker, whitelist_updates).await {
Ok(_) => {
info!("[SYNC WHITELIST UPDATES] Synced {mapping_len} whitelists");
let mut lock = self.torrents_whitelist_updates.write();
let mut removed_count = 0i64;
for (_, (timestamp, _)) in mapping {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
for timestamp in timestamps_to_remove {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
if removed_count > 0 {
self.update_stats(StatsEvent::WhitelistUpdates, -removed_count);
}
Ok(())
}
Err(_) => {
error!("[SYNC WHITELIST UPDATES] Unable to sync {mapping_len} whitelists");
Err(())
}
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/cleanup_stats_atomics.rs | src/tracker/impls/cleanup_stats_atomics.rs | use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use log::info;
use crate::tracker::impls::torrent_sharding::CACHE_LINE_SIZE;
use crate::tracker::structs::cleanup_stats_atomic::CleanupStatsAtomic;
use crate::tracker::structs::padded_atomic_u64::PaddedAtomicU64;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl CleanupStatsAtomic {
pub(crate) fn new() -> Self {
Self {
torrents: PaddedAtomicU64 {
value: AtomicU64::new(0),
_padding: [0; CACHE_LINE_SIZE - std::mem::size_of::<AtomicU64>()],
},
seeds: PaddedAtomicU64 {
value: AtomicU64::new(0),
_padding: [0; CACHE_LINE_SIZE - std::mem::size_of::<AtomicU64>()],
},
peers: PaddedAtomicU64 {
value: AtomicU64::new(0),
_padding: [0; CACHE_LINE_SIZE - std::mem::size_of::<AtomicU64>()],
},
}
}
pub(crate) fn add_torrents(&self, count: u64) {
self.torrents.value.fetch_add(count, Ordering::Relaxed);
}
pub(crate) fn add_seeds(&self, count: u64) {
self.seeds.value.fetch_add(count, Ordering::Relaxed);
}
pub(crate) fn add_peers(&self, count: u64) {
self.peers.value.fetch_add(count, Ordering::Relaxed);
}
pub(crate) fn apply_to_tracker(&self, _tracker: &Arc<TorrentTracker>) {
let torrents = self.torrents.value.load(Ordering::Relaxed);
let seeds = self.seeds.value.load(Ordering::Relaxed);
let peers = self.peers.value.load(Ordering::Relaxed);
if torrents > 0 || seeds > 0 || peers > 0 {
info!("[CLEANUP TOTAL] Torrents: {torrents} - Seeds: {seeds} - Peers: {peers}");
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_handlers.rs | src/tracker/impls/torrent_tracker_handlers.rs | use std::collections::{BTreeMap, HashMap};
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::SystemTime;
use log::debug;
use crate::common::structs::custom_error::CustomError;
use crate::common::structs::number_of_bytes::NumberOfBytes;
use crate::tracker::enums::announce_event::AnnounceEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::announce_query_request::AnnounceQueryRequest;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::peer_id::PeerId;
use crate::tracker::structs::scrape_query_request::ScrapeQueryRequest;
use crate::tracker::structs::torrent_entry::TorrentEntry;
use crate::tracker::structs::torrent_peer::TorrentPeer;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
use crate::tracker::structs::user_id::UserId;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn validate_announce(&self, remote_addr: IpAddr, query: HashMap<String, Vec<Vec<u8>>>) -> Result<AnnounceQueryRequest, CustomError>
{
let now = std::time::Instant::now();
fn get_required_bytes<'a>(query: &'a HashMap<String, Vec<Vec<u8>>>, field: &str, expected_len: Option<usize>) -> Result<&'a [u8], CustomError> {
let value = query.get(field)
.ok_or_else(|| CustomError::new(&format!("missing {field}")))?
.first()
.ok_or_else(|| CustomError::new(&format!("no {field} given")))?;
if let Some(len) = expected_len {
if value.len() != len {
return Err(CustomError::new(&format!("invalid {field} size")));
}
}
Ok(value.as_slice())
}
fn parse_integer<T: std::str::FromStr>(query: &HashMap<String, Vec<Vec<u8>>>, field: &str) -> Result<T, CustomError> {
let bytes = get_required_bytes(query, field, None)?;
let str_value = std::str::from_utf8(bytes)
.map_err(|_| CustomError::new(&format!("invalid {field}")))?;
str_value.parse::<T>()
.map_err(|_| CustomError::new(&format!("missing or invalid {field}")))
}
let info_hash_bytes = get_required_bytes(&query, "info_hash", Some(20))?;
let peer_id_bytes = get_required_bytes(&query, "peer_id", Some(20))?;
let port_integer = parse_integer::<u16>(&query, "port")?;
// Parse info_hash with optimized conversion
let info_hash = InfoHash::from(info_hash_bytes);
let peer_id = PeerId::from(peer_id_bytes);
// Parse optional parameters with defaults
let uploaded_integer = parse_integer::<u64>(&query, "uploaded").unwrap_or(0);
let downloaded_integer = parse_integer::<u64>(&query, "downloaded").unwrap_or(0);
let left_integer = parse_integer::<u64>(&query, "left").unwrap_or(0);
let compact_bool = query.get("compact")
.and_then(|v| v.first())
.and_then(|bytes| std::str::from_utf8(bytes).ok())
.and_then(|s| s.parse::<u8>().ok())
.map(|v| v == 1)
.unwrap_or(false);
let event_integer = query.get("event")
.and_then(|v| v.first())
.and_then(|bytes| std::str::from_utf8(bytes).ok())
.map(|s| match s.to_lowercase().as_str() {
"stopped" => AnnounceEvent::Stopped,
"completed" => AnnounceEvent::Completed,
_ => AnnounceEvent::Started,
})
.unwrap_or(AnnounceEvent::Started);
let no_peer_id_bool = query.contains_key("no_peer_id");
let numwant_integer = query.get("numwant")
.and_then(|v| v.first())
.and_then(|bytes| std::str::from_utf8(bytes).ok())
.and_then(|s| s.parse::<u64>().ok())
.map(|v| if v == 0 || v > 72 { 72 } else { v })
.unwrap_or(72);
let elapsed = now.elapsed();
debug!("[PERF] Announce validation took: {:?}", elapsed);
Ok(AnnounceQueryRequest {
info_hash,
peer_id,
port: port_integer,
uploaded: uploaded_integer,
downloaded: downloaded_integer,
left: left_integer,
compact: compact_bool,
no_peer_id: no_peer_id_bool,
event: event_integer,
remote_addr,
numwant: numwant_integer,
})
}
#[tracing::instrument(level = "debug")]
pub async fn handle_announce(&self, data: Arc<TorrentTracker>, announce_query: AnnounceQueryRequest, user_key: Option<UserId>) -> Result<(TorrentPeer, TorrentEntry), CustomError>
{
let now = std::time::Instant::now();
let mut torrent_peer = TorrentPeer {
peer_id: announce_query.peer_id,
peer_addr: SocketAddr::new(announce_query.remote_addr, announce_query.port),
updated: std::time::Instant::now(),
uploaded: NumberOfBytes(announce_query.uploaded as i64),
downloaded: NumberOfBytes(announce_query.downloaded as i64),
left: NumberOfBytes(announce_query.left as i64),
event: AnnounceEvent::None,
};
let is_persistent = data.config.database.persistent;
let users_enabled = data.config.tracker_config.users_enabled;
match announce_query.event {
AnnounceEvent::Started | AnnounceEvent::None => {
torrent_peer.event = AnnounceEvent::Started;
debug!("[HANDLE ANNOUNCE] Adding to infohash {} peerid {}", announce_query.info_hash, announce_query.peer_id);
let torrent_entry = data.add_torrent_peer(
announce_query.info_hash,
announce_query.peer_id,
torrent_peer.clone(),
false
);
if is_persistent {
let _ = data.add_torrent_update(
announce_query.info_hash,
torrent_entry.1.clone(),
UpdatesAction::Add
);
}
if users_enabled {
if let Some(user_id) = user_key {
if let Some(mut user) = data.get_user(user_id) {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
user.updated = now;
user.torrents_active.insert(announce_query.info_hash, now);
data.add_user(user_id, user.clone());
if is_persistent {
data.add_user_update(user_id, user, UpdatesAction::Add);
}
}
}
}
let elapsed = now.elapsed();
debug!("[PERF] Announce Started handling took: {elapsed:?}");
Ok((torrent_peer, TorrentEntry {
seeds: torrent_entry.1.seeds,
peers: torrent_entry.1.peers,
completed: torrent_entry.1.completed,
updated: torrent_entry.1.updated
}))
}
AnnounceEvent::Stopped => {
torrent_peer.event = AnnounceEvent::Stopped;
debug!("[HANDLE ANNOUNCE] Removing from infohash {} peerid {}", announce_query.info_hash, announce_query.peer_id);
let torrent_entry = match data.remove_torrent_peer(
announce_query.info_hash,
announce_query.peer_id,
is_persistent,
false
) {
(Some(_), Some(new_torrent)) => {
if users_enabled {
if let Some(user_id) = user_key {
if let Some(mut user) = data.get_user(user_id) {
user.uploaded += announce_query.uploaded;
user.downloaded += announce_query.downloaded;
user.updated = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
user.torrents_active.remove(&announce_query.info_hash);
data.add_user(user_id, user.clone());
if is_persistent {
data.add_user_update(user_id, user, UpdatesAction::Add);
}
}
}
}
new_torrent
}
_ => TorrentEntry::new()
};
if is_persistent {
let _ = data.add_torrent_update(
announce_query.info_hash,
torrent_entry.clone(),
UpdatesAction::Add
);
}
let elapsed = now.elapsed();
debug!("[PERF] Announce Stopped handling took: {elapsed:?}");
Ok((torrent_peer, torrent_entry))
}
AnnounceEvent::Completed => {
torrent_peer.event = AnnounceEvent::Completed;
debug!("[HANDLE ANNOUNCE] Adding to infohash {} peerid {}", announce_query.info_hash, announce_query.peer_id);
let torrent_entry = data.add_torrent_peer(
announce_query.info_hash,
announce_query.peer_id,
torrent_peer.clone(),
true
);
if is_persistent {
let _ = data.add_torrent_update(
announce_query.info_hash,
torrent_entry.1.clone(),
UpdatesAction::Add
);
}
if users_enabled {
if let Some(user_id) = user_key {
if let Some(mut user) = data.get_user(user_id) {
user.completed += 1;
user.updated = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
data.add_user(user_id, user.clone());
if is_persistent {
data.add_user_update(user_id, user, UpdatesAction::Add);
}
}
}
}
let elapsed = now.elapsed();
debug!("[PERF] Announce Completed handling took: {elapsed:?}");
Ok((torrent_peer, torrent_entry.1))
}
}
}
#[tracing::instrument(level = "debug")]
pub async fn validate_scrape(&self, query: HashMap<String, Vec<Vec<u8>>>) -> Result<ScrapeQueryRequest, CustomError>
{
let now = std::time::Instant::now();
match query.get("info_hash") {
None => Err(CustomError::new("missing info_hash")),
Some(result) => {
if result.is_empty() {
return Err(CustomError::new("no info_hash given"));
}
// Optimized batch parsing of info hashes
let mut info_hash_vec = Vec::with_capacity(result.len());
for hash in result {
if hash.len() != 20 {
return Err(CustomError::new("an invalid info_hash was given"));
}
info_hash_vec.push(InfoHash::from(hash.as_slice()));
}
let elapsed = now.elapsed();
debug!("[PERF] Scrape validation took: {elapsed:?}");
Ok(ScrapeQueryRequest { info_hash: info_hash_vec })
}
}
}
#[tracing::instrument(level = "debug")]
pub async fn handle_scrape(&self, data: Arc<TorrentTracker>, scrape_query: ScrapeQueryRequest) -> BTreeMap<InfoHash, TorrentEntry>
{
let now = std::time::Instant::now();
let result = scrape_query.info_hash.iter()
.map(|&info_hash| {
let entry = data.get_torrent(info_hash).unwrap_or_default();
(info_hash, entry)
})
.collect();
let elapsed = now.elapsed();
debug!("[PERF] Scrape handling took: {:?}", elapsed);
result
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_torrents_whitelist.rs | src/tracker/impls/torrent_tracker_torrents_whitelist.rs | use std::sync::Arc;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn load_whitelist(&self, tracker: Arc<TorrentTracker>)
{
if let Ok(whitelist) = self.sqlx.load_whitelist(tracker).await {
info!("Loaded {whitelist} whitelists");
}
}
#[tracing::instrument(level = "debug")]
pub async fn save_whitelist(&self, tracker: Arc<TorrentTracker>, hashes: Vec<(InfoHash, UpdatesAction)>) -> Result<(), ()>
{
let hashes_len = hashes.len();
match self.sqlx.save_whitelist(tracker, hashes).await {
Ok(_) => {
info!("[SYNC WHITELIST] Synced {hashes_len} whitelists");
Ok(())
}
Err(_) => {
error!("[SYNC WHITELIST] Unable to sync {hashes_len} whitelists");
Err(())
}
}
}
#[tracing::instrument(level = "debug")]
pub fn add_whitelist(&self, info_hash: InfoHash) -> bool
{
let mut lock = self.torrents_whitelist.write();
if !lock.contains(&info_hash) {
lock.push(info_hash);
self.update_stats(StatsEvent::Whitelist, 1);
return true;
}
false
}
#[tracing::instrument(level = "debug")]
pub fn get_whitelist(&self) -> Vec<InfoHash>
{
let lock = self.torrents_whitelist.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn check_whitelist(&self, info_hash: InfoHash) -> bool
{
let lock = self.torrents_whitelist.read_recursive();
lock.contains(&info_hash)
}
#[tracing::instrument(level = "debug")]
pub fn remove_whitelist(&self, info_hash: InfoHash) -> bool
{
let mut lock = self.torrents_whitelist.write();
if let Some(index) = lock.iter().position(|r| *r == info_hash) {
lock.swap_remove(index);
self.update_stats(StatsEvent::Whitelist, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn clear_whitelist(&self)
{
let mut lock = self.torrents_whitelist.write();
lock.clear();
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_torrents_blacklist.rs | src/tracker/impls/torrent_tracker_torrents_blacklist.rs | use std::sync::Arc;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn load_blacklist(&self, tracker: Arc<TorrentTracker>)
{
if let Ok(blacklist) = self.sqlx.load_blacklist(tracker).await {
info!("Loaded {blacklist} blacklists");
}
}
#[tracing::instrument(level = "debug")]
pub async fn save_blacklist(&self, tracker: Arc<TorrentTracker>, hashes: Vec<(InfoHash, UpdatesAction)>) -> Result<(), ()>
{
let hashes_len = hashes.len();
match self.sqlx.save_blacklist(tracker, hashes).await {
Ok(_) => {
info!("[SYNC BLACKLIST] Synced {hashes_len} blacklists");
Ok(())
}
Err(_) => {
error!("[SYNC BLACKLIST] Unable to sync {hashes_len} blacklists");
Err(())
}
}
}
#[tracing::instrument(level = "debug")]
pub fn add_blacklist(&self, info_hash: InfoHash) -> bool
{
let mut lock = self.torrents_blacklist.write();
if !lock.contains(&info_hash) {
lock.push(info_hash);
self.update_stats(StatsEvent::Blacklist, 1);
return true;
}
false
}
#[tracing::instrument(level = "debug")]
pub fn get_blacklist(&self) -> Vec<InfoHash>
{
let lock = self.torrents_blacklist.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn check_blacklist(&self, info_hash: InfoHash) -> bool
{
let lock = self.torrents_blacklist.read_recursive();
lock.contains(&info_hash)
}
#[tracing::instrument(level = "debug")]
pub fn remove_blacklist(&self, info_hash: InfoHash) -> bool
{
let mut lock = self.torrents_blacklist.write();
if let Some(index) = lock.iter().position(|r| *r == info_hash) {
lock.swap_remove(index);
self.update_stats(StatsEvent::Blacklist, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn clear_blacklist(&self)
{
let mut lock = self.torrents_blacklist.write();
lock.clear();
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_torrents_blacklist_updates.rs | src/tracker/impls/torrent_tracker_torrents_blacklist_updates.rs | use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::time::SystemTime;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub fn add_blacklist_update(&self, info_hash: InfoHash, updates_action: UpdatesAction) -> bool
{
let mut lock = self.torrents_blacklist_updates.write();
let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
if lock.insert(timestamp, (info_hash, updates_action)).is_none() {
self.update_stats(StatsEvent::BlacklistUpdates, 1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn add_blacklist_updates(&self, hashes: Vec<(InfoHash, UpdatesAction)>) -> Vec<(InfoHash, bool)>
{
let mut lock = self.torrents_blacklist_updates.write();
let mut returned_data = Vec::with_capacity(hashes.len());
let mut success_count = 0i64;
for (info_hash, updates_action) in hashes {
let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
let success = lock.insert(timestamp, (info_hash, updates_action)).is_none();
if success {
success_count += 1;
}
returned_data.push((info_hash, success));
}
if success_count > 0 {
self.update_stats(StatsEvent::BlacklistUpdates, success_count);
}
returned_data
}
#[tracing::instrument(level = "debug")]
pub fn get_blacklist_updates(&self) -> HashMap<u128, (InfoHash, UpdatesAction)>
{
let lock = self.torrents_blacklist_updates.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn remove_blacklist_update(&self, timestamp: &u128) -> bool
{
let mut lock = self.torrents_blacklist_updates.write();
if lock.remove(timestamp).is_some() {
self.update_stats(StatsEvent::BlacklistUpdates, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn clear_blacklist_updates(&self)
{
let mut lock = self.torrents_blacklist_updates.write();
lock.clear();
self.set_stats(StatsEvent::BlacklistUpdates, 0);
}
#[tracing::instrument(level = "debug")]
pub async fn save_blacklist_updates(&self, torrent_tracker: Arc<TorrentTracker>) -> Result<(), ()>
{
let updates = {
let lock = self.torrents_blacklist_updates.read_recursive();
lock.clone()
};
if updates.is_empty() {
return Ok(());
}
let mut mapping: HashMap<InfoHash, (u128, UpdatesAction)> = HashMap::with_capacity(updates.len());
let mut timestamps_to_remove = Vec::new();
for (timestamp, (info_hash, updates_action)) in updates {
match mapping.entry(info_hash) {
Entry::Occupied(mut o) => {
let existing = o.get();
if timestamp > existing.0 {
timestamps_to_remove.push(existing.0);
o.insert((timestamp, updates_action));
} else {
timestamps_to_remove.push(timestamp);
}
}
Entry::Vacant(v) => {
v.insert((timestamp, updates_action));
}
}
}
let mapping_len = mapping.len();
let blacklist_updates: Vec<(InfoHash, UpdatesAction)> = mapping
.iter()
.map(|(info_hash, (_, updates_action))| (*info_hash, *updates_action))
.collect();
match self.save_blacklist(torrent_tracker, blacklist_updates).await {
Ok(_) => {
info!("[SYNC BLACKLIST UPDATES] Synced {mapping_len} blacklists");
let mut lock = self.torrents_blacklist_updates.write();
let mut removed_count = 0i64;
for (_, (timestamp, _)) in mapping {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
for timestamp in timestamps_to_remove {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
if removed_count > 0 {
self.update_stats(StatsEvent::BlacklistUpdates, -removed_count);
}
Ok(())
}
Err(_) => {
error!("[SYNC BLACKLIST UPDATES] Unable to sync {mapping_len} blacklists");
Err(())
}
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_export.rs | src/tracker/impls/torrent_tracker_export.rs | use std::fs;
use std::process::exit;
use std::sync::Arc;
use log::{error, info};
use crate::structs::Cli;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn export(&self, args: &Cli, tracker: Arc<TorrentTracker>)
{
info!("[EXPORT] Requesting to export data");
let config = &tracker.config.tracker_config;
let torrents_file = &args.export_file_torrents;
info!("[EXPORT] Exporting torrents to file {torrents_file}");
let torrents_data = serde_json::to_vec(&tracker.torrents_sharding.get_all_content())
.expect("[EXPORT] Failed to serialize torrents");
if let Err(error) = fs::write(torrents_file, torrents_data) {
error!("[EXPORT] The torrents file {torrents_file} could not be generated!");
panic!("[EXPORT] {error}")
}
info!("[EXPORT] The torrents have been exported");
if config.whitelist_enabled {
let whitelists_file = &args.export_file_whitelists;
info!("[EXPORT] Exporting whitelists to file {whitelists_file}");
let whitelists_data = serde_json::to_vec(&tracker.get_whitelist())
.expect("[EXPORT] Failed to serialize whitelists");
if let Err(error) = fs::write(whitelists_file, whitelists_data) {
error!("[EXPORT] The whitelists file {whitelists_file} could not be generated!");
panic!("[EXPORT] {error}")
}
info!("[EXPORT] The whitelists have been exported");
}
if config.blacklist_enabled {
let blacklists_file = &args.export_file_blacklists;
info!("[EXPORT] Exporting blacklists to file {blacklists_file}");
let blacklists_data = serde_json::to_vec(&tracker.get_blacklist())
.expect("[EXPORT] Failed to serialize blacklists");
if let Err(error) = fs::write(blacklists_file, blacklists_data) {
error!("[EXPORT] The blacklists file {blacklists_file} could not be generated!");
panic!("[EXPORT] {error}")
}
info!("[EXPORT] The blacklists have been exported");
}
if config.keys_enabled {
let keys_file = &args.export_file_keys;
info!("[EXPORT] Exporting keys to file {keys_file}");
let keys_data = serde_json::to_vec(&tracker.get_keys())
.expect("[EXPORT] Failed to serialize keys");
if let Err(error) = fs::write(keys_file, keys_data) {
error!("[EXPORT] The keys file {keys_file} could not be generated!");
panic!("[EXPORT] {error}")
}
info!("[EXPORT] The keys have been exported");
}
if config.users_enabled {
let users_file = &args.export_file_users;
info!("[EXPORT] Exporting users to file {users_file}");
let users_data = serde_json::to_vec(&tracker.get_users())
.expect("[EXPORT] Failed to serialize users");
if let Err(error) = fs::write(users_file, users_data) {
error!("[EXPORT] The users file {users_file} could not be generated!");
panic!("[EXPORT] {error}")
}
info!("[EXPORT] The users have been exported");
}
info!("[EXPORT] Exporting of data completed");
exit(0)
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_sharding.rs | src/tracker/impls/torrent_sharding.rs | use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use log::info;
use parking_lot::RwLock;
use tokio::runtime::Builder;
use tokio::sync::Semaphore;
use tokio::task::JoinHandle;
use tokio_shutdown::Shutdown;
use crate::common::common::shutdown_waiting;
use crate::tracker::structs::cleanup_stats_atomic::CleanupStatsAtomic;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::peer_id::PeerId;
use crate::tracker::structs::torrent_entry::TorrentEntry;
use crate::tracker::structs::torrent_sharding::TorrentSharding;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
pub const CACHE_LINE_SIZE: usize = 64;
impl Default for TorrentSharding {
fn default() -> Self {
Self::new()
}
}
#[allow(dead_code)]
impl TorrentSharding {
#[tracing::instrument(level = "debug")]
pub fn new() -> TorrentSharding {
TorrentSharding {
shards: std::array::from_fn(|_| Arc::new(RwLock::new(BTreeMap::new()))),
}
}
pub async fn cleanup_threads(&self, torrent_tracker: Arc<TorrentTracker>, shutdown: Shutdown, peer_timeout: Duration, persistent: bool) {
let cleanup_interval = torrent_tracker.config.tracker_config.peers_cleanup_interval;
let cleanup_threads = torrent_tracker.config.tracker_config.peers_cleanup_threads;
let cleanup_pool = Builder::new_multi_thread()
.worker_threads(cleanup_threads as usize)
.thread_name("cleanup-worker")
.enable_all()
.build()
.unwrap();
let max_concurrent = std::cmp::max(cleanup_threads as usize * 2, 8);
let semaphore = Arc::new(Semaphore::new(max_concurrent));
let cleanup_handles_capacity = 256;
let timer_handle: JoinHandle<()> = cleanup_pool.spawn({
let torrent_tracker_clone = Arc::clone(&torrent_tracker);
let shutdown_clone = shutdown.clone();
let sem_clone = Arc::clone(&semaphore);
async move {
let batch_size = 256 / max_concurrent;
loop {
if shutdown_waiting(
Duration::from_secs(cleanup_interval),
shutdown_clone.clone()
).await {
break;
}
let stats = Arc::new(CleanupStatsAtomic::new());
let mut cleanup_handles = Vec::with_capacity(cleanup_handles_capacity);
let cutoff = Instant::now() - peer_timeout;
// Process shards in batches for better cache locality
for batch_start in (0u8..=255u8).step_by(batch_size) {
let batch_end = std::cmp::min(batch_start + batch_size as u8, 255);
let tracker_clone = Arc::clone(&torrent_tracker_clone);
let sem_clone = Arc::clone(&sem_clone);
let stats_clone = Arc::clone(&stats);
let handle = tokio::spawn(async move {
let _permit = sem_clone.acquire().await.ok()?;
// Process batch of shards
for shard in batch_start..=batch_end {
Self::cleanup_shard_optimized(
Arc::clone(&tracker_clone),
shard,
cutoff,
persistent,
Arc::clone(&stats_clone)
).await;
}
Some(())
});
cleanup_handles.push(handle);
}
// Wait for all cleanups to complete
futures::future::join_all(cleanup_handles).await;
// Apply batch stats update
stats.apply_to_tracker(&torrent_tracker_clone);
}
}
});
// Wait for shutdown signal
shutdown.handle().await;
// Cancel the cleanup task
timer_handle.abort();
let _ = timer_handle.await;
// Shutdown the runtime properly
cleanup_pool.shutdown_background();
}
async fn cleanup_shard_optimized(
torrent_tracker: Arc<TorrentTracker>,
shard: u8,
cutoff: Instant,
persistent: bool,
stats: Arc<CleanupStatsAtomic>
) {
let (mut torrents_removed, mut seeds_removed, mut peers_removed) = (0u64, 0u64, 0u64);
if let Some(shard_arc) = torrent_tracker.torrents_sharding.shards.get(shard as usize) {
// Use SmallVec for better stack allocation for small collections
let mut expired_full: Vec<InfoHash> = Vec::with_capacity(32);
let mut expired_partial: Vec<(InfoHash, Vec<PeerId>, Vec<PeerId>)> = Vec::with_capacity(64);
// Quick read pass to identify expired entries
{
let shard_read = shard_arc.read();
// Early exit if shard is empty
if shard_read.is_empty() {
return;
}
for (info_hash, torrent_entry) in shard_read.iter() {
// Fast path: torrent not updated within timeout => all peers are expired
if torrent_entry.updated < cutoff {
expired_full.push(*info_hash);
continue;
}
// Optimized: only allocate if we find expired peers
let mut expired_seeds = Vec::new();
let mut expired_peers = Vec::new();
let mut has_expired = false;
// Process seeds and peers in parallel chunks if large enough
if torrent_entry.seeds.len() > 100 {
// For large collections, collect in parallel
expired_seeds = torrent_entry.seeds.iter()
.filter(|(_, peer)| peer.updated < cutoff)
.map(|(id, _)| *id)
.collect();
has_expired = !expired_seeds.is_empty();
} else {
// For small collections, use simpler iteration
for (peer_id, torrent_peer) in &torrent_entry.seeds {
if torrent_peer.updated < cutoff {
expired_seeds.push(*peer_id);
has_expired = true;
}
}
}
// Same optimization for peers
if torrent_entry.peers.len() > 100 {
expired_peers = torrent_entry.peers.iter()
.filter(|(_, peer)| peer.updated < cutoff)
.map(|(id, _)| *id)
.collect();
has_expired = has_expired || !expired_peers.is_empty();
} else {
for (peer_id, torrent_peer) in &torrent_entry.peers {
if torrent_peer.updated < cutoff {
expired_peers.push(*peer_id);
has_expired = true;
}
}
}
if has_expired {
expired_partial.push((*info_hash, expired_seeds, expired_peers));
}
}
}
// Process removals if needed
if !expired_partial.is_empty() || !expired_full.is_empty() {
let mut shard_write = shard_arc.write();
// Process partial expirations
for (info_hash, expired_seeds, expired_peers) in expired_partial {
if let Entry::Occupied(mut entry) = shard_write.entry(info_hash) {
let torrent_entry = entry.get_mut();
// Batch remove seeds - use retain for better performance on large collections
if expired_seeds.len() > 10 {
let expired_set: std::collections::HashSet<_> = expired_seeds.into_iter().collect();
let before_len = torrent_entry.seeds.len();
torrent_entry.seeds.retain(|k, _| !expired_set.contains(k));
seeds_removed += (before_len - torrent_entry.seeds.len()) as u64;
} else {
for peer_id in expired_seeds {
if torrent_entry.seeds.remove(&peer_id).is_some() {
seeds_removed += 1;
}
}
}
// Batch remove peers - use retain for better performance on large collections
if expired_peers.len() > 10 {
let expired_set: std::collections::HashSet<_> = expired_peers.into_iter().collect();
let before_len = torrent_entry.peers.len();
torrent_entry.peers.retain(|k, _| !expired_set.contains(k));
peers_removed += (before_len - torrent_entry.peers.len()) as u64;
} else {
for peer_id in expired_peers {
if torrent_entry.peers.remove(&peer_id).is_some() {
peers_removed += 1;
}
}
}
// Remove empty torrent if allowed
if !persistent && torrent_entry.seeds.is_empty() && torrent_entry.peers.is_empty() {
entry.remove();
torrents_removed += 1;
}
}
}
// Process full expirations (entire torrent stale)
if !expired_full.is_empty() {
if persistent {
// When persistent, just clear the peers
for info_hash in expired_full {
if let Some(torrent_entry) = shard_write.get_mut(&info_hash) {
// Safety re-check
if torrent_entry.updated >= cutoff { continue; }
seeds_removed += torrent_entry.seeds.len() as u64;
peers_removed += torrent_entry.peers.len() as u64;
torrent_entry.seeds.clear();
torrent_entry.peers.clear();
}
}
} else {
// Batch remove all expired torrents at once
for info_hash in expired_full {
if let Entry::Occupied(entry) = shard_write.entry(info_hash) {
// Safety re-check
if entry.get().updated >= cutoff { continue; }
let torrent_entry = entry.get();
seeds_removed += torrent_entry.seeds.len() as u64;
peers_removed += torrent_entry.peers.len() as u64;
entry.remove();
torrents_removed += 1;
}
}
}
}
}
}
// Update shared stats atomically
if torrents_removed > 0 {
stats.add_torrents(torrents_removed);
}
if seeds_removed > 0 {
stats.add_seeds(seeds_removed);
}
if peers_removed > 0 {
stats.add_peers(peers_removed);
}
if seeds_removed > 0 || peers_removed > 0 || torrents_removed > 0 {
info!("[PEERS] Shard: {shard} - Torrents: {torrents_removed} - Seeds: {seeds_removed} - Peers: {peers_removed}");
}
}
#[tracing::instrument(level = "debug")]
#[inline(always)]
pub fn contains_torrent(&self, info_hash: InfoHash) -> bool {
let shard_index = info_hash.0[0] as usize;
// Use unchecked access since we know index is always valid (0-255)
unsafe {
self.shards.get_unchecked(shard_index)
.read()
.contains_key(&info_hash)
}
}
#[tracing::instrument(level = "debug")]
#[inline(always)]
pub fn contains_peer(&self, info_hash: InfoHash, peer_id: PeerId) -> bool {
let shard_index = info_hash.0[0] as usize;
// Use unchecked access since we know index is always valid (0-255)
unsafe {
let shard = self.shards.get_unchecked(shard_index).read();
shard.get(&info_hash)
.map(|entry| entry.seeds.contains_key(&peer_id) || entry.peers.contains_key(&peer_id))
.unwrap_or(false)
}
}
#[tracing::instrument(level = "debug")]
#[inline(always)]
pub fn get_shard(&self, shard: u8) -> Option<Arc<RwLock<BTreeMap<InfoHash, TorrentEntry>>>> {
self.shards.get(shard as usize).cloned()
}
#[tracing::instrument(level = "debug")]
pub fn get_shard_content(&self, shard: u8) -> BTreeMap<InfoHash, TorrentEntry> {
self.shards.get(shard as usize)
.map(|s| s.read().clone())
.unwrap_or_default()
}
#[tracing::instrument(level = "debug")]
pub fn get_all_content(&self) -> BTreeMap<InfoHash, TorrentEntry> {
// Pre-calculate total size for better allocation
let total_size: usize = self.shards.iter()
.map(|shard| shard.read().len())
.sum();
let mut torrents_return = BTreeMap::new();
// Reserve capacity if we have a reasonable estimate
if total_size < 100000 {
// Only pre-allocate for reasonable sizes
torrents_return = BTreeMap::new();
}
for shard in &self.shards {
let shard_data = shard.read();
torrents_return.extend(shard_data.iter().map(|(k, v)| (*k, v.clone())));
}
torrents_return
}
#[tracing::instrument(level = "debug")]
pub fn get_torrents_amount(&self) -> u64 {
// Use parallel iteration for large shard counts
self.shards.iter()
.map(|shard| shard.read().len() as u64)
.sum()
}
pub fn get_multiple_torrents(&self, info_hashes: &[InfoHash]) -> BTreeMap<InfoHash, Option<TorrentEntry>> {
let mut results = BTreeMap::new();
// Group by shard more efficiently
let mut shard_groups: [Vec<InfoHash>; 256] = std::array::from_fn(|_| Vec::new());
for &info_hash in info_hashes {
let shard_idx = info_hash.0[0] as usize;
shard_groups[shard_idx].push(info_hash);
}
// Process only non-empty shards
for (shard_index, hashes) in shard_groups.iter().enumerate() {
if !hashes.is_empty() {
let shard = self.shards[shard_index].read();
for &hash in hashes {
results.insert(hash, shard.get(&hash).cloned());
}
}
}
results
}
pub fn batch_contains_peers(&self, queries: &[(InfoHash, PeerId)]) -> Vec<bool> {
let mut results = vec![false; queries.len()];
// Group queries by shard
let mut shard_groups: [Vec<usize>; 256] = std::array::from_fn(|_| Vec::new());
for (idx, &(info_hash, _)) in queries.iter().enumerate() {
let shard_idx = info_hash.0[0] as usize;
shard_groups[shard_idx].push(idx);
}
// Process only non-empty shards
for (shard_index, indices) in shard_groups.iter().enumerate() {
if !indices.is_empty() {
let shard = self.shards[shard_index].read();
for &idx in indices {
let (info_hash, peer_id) = queries[idx];
results[idx] = shard.get(&info_hash)
.map(|entry| entry.seeds.contains_key(&peer_id) || entry.peers.contains_key(&peer_id))
.unwrap_or(false);
}
}
}
results
}
pub fn iter_all_torrents<F>(&self, mut f: F)
where
F: FnMut(&InfoHash, &TorrentEntry)
{
for shard in &self.shards {
let shard_data = shard.read();
for (k, v) in shard_data.iter() {
f(k, v);
}
}
}
// New method for parallel iteration with Rayon (if available)
pub fn par_iter_all_torrents<F>(&self, f: F)
where
F: Fn(&InfoHash, &TorrentEntry) + Sync + Send
{
use rayon::prelude::*;
self.shards.par_iter().for_each(|shard| {
let shard_data = shard.read();
for (k, v) in shard_data.iter() {
f(k, v);
}
});
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_users.rs | src/tracker/impls/torrent_tracker_users.rs | use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
use crate::tracker::structs::user_entry_item::UserEntryItem;
use crate::tracker::structs::user_id::UserId;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn load_users(&self, tracker: Arc<TorrentTracker>)
{
if let Ok(users) = self.sqlx.load_users(tracker).await {
info!("Loaded {users} users");
}
}
#[tracing::instrument(level = "debug")]
pub async fn save_users(&self, tracker: Arc<TorrentTracker>, users: BTreeMap<UserId, (UserEntryItem, UpdatesAction)>) -> Result<(), ()>
{
let users_len = users.len();
match self.sqlx.save_users(tracker, users).await {
Ok(_) => {
info!("[SYNC USERS] Synced {users_len} users");
Ok(())
}
Err(_) => {
error!("[SYNC USERS] Unable to sync {users_len} users");
Err(())
}
}
}
#[tracing::instrument(level = "debug")]
pub fn add_user(&self, user_id: UserId, user_entry_item: UserEntryItem) -> bool
{
let mut lock = self.users.write();
match lock.entry(user_id) {
Entry::Vacant(v) => {
self.update_stats(StatsEvent::Users, 1);
v.insert(user_entry_item);
true
}
Entry::Occupied(mut o) => {
o.insert(user_entry_item);
false
}
}
}
#[tracing::instrument(level = "debug")]
pub fn add_user_active_torrent(&self, user_id: UserId, info_hash: InfoHash) -> bool
{
let mut lock = self.users.write();
match lock.entry(user_id) {
Entry::Vacant(_) => {
false
}
Entry::Occupied(mut o) => {
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
o.get_mut().torrents_active.insert(info_hash, timestamp);
true
}
}
}
#[tracing::instrument(level = "debug")]
pub fn get_user(&self, id: UserId) -> Option<UserEntryItem>
{
let lock = self.users.read_recursive();
lock.get(&id).cloned()
}
#[tracing::instrument(level = "debug")]
pub fn get_users(&self) -> BTreeMap<UserId, UserEntryItem>
{
let lock = self.users.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn remove_user(&self, user_id: UserId) -> Option<UserEntryItem>
{
let mut lock = self.users.write();
if let Some(data) = lock.remove(&user_id) {
self.update_stats(StatsEvent::Users, -1);
Some(data)
} else {
None
}
}
#[tracing::instrument(level = "debug")]
pub fn remove_user_active_torrent(&self, user_id: UserId, info_hash: InfoHash) -> bool
{
let mut lock = self.users.write();
match lock.entry(user_id) {
Entry::Vacant(_) => {
false
}
Entry::Occupied(mut o) => {
o.get_mut().torrents_active.remove(&info_hash).is_some()
}
}
}
#[tracing::instrument(level = "debug")]
pub fn check_user_key(&self, key: UserId) -> Option<UserId>
{
let lock = self.users.read_recursive();
for (user_id, user_entry_item) in lock.iter() {
if user_entry_item.key == key {
return Some(*user_id);
}
}
None
}
#[tracing::instrument(level = "debug")]
pub fn clean_user_active_torrents(&self, peer_timeout: Duration)
{
let current_time = SystemTime::now();
let timeout_threshold = current_time.duration_since(UNIX_EPOCH).unwrap().as_secs() - peer_timeout.as_secs();
let remove_active_torrents = {
let lock = self.users.read_recursive();
info!("[USERS] Scanning {} users with dead active torrents", lock.len());
let mut to_remove = Vec::new();
for (user_id, user_entry_item) in lock.iter() {
for (info_hash, &updated) in &user_entry_item.torrents_active {
if updated < timeout_threshold {
to_remove.push((*user_id, *info_hash));
}
}
}
to_remove
};
let torrents_cleaned = remove_active_torrents.len() as u64;
if !remove_active_torrents.is_empty() {
let mut lock = self.users.write();
for (user_id, info_hash) in remove_active_torrents {
if let Entry::Occupied(mut o) = lock.entry(user_id) {
o.get_mut().torrents_active.remove(&info_hash);
}
}
}
info!("[USERS] Removed {torrents_cleaned} active torrents in users");
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_peers.rs | src/tracker/impls/torrent_tracker_peers.rs | use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use std::net::{IpAddr, SocketAddr};
use log::info;
use crate::common::structs::number_of_bytes::NumberOfBytes;
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::torrent_peers_type::TorrentPeersType;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::peer_id::PeerId;
use crate::tracker::structs::torrent_entry::TorrentEntry;
use crate::tracker::structs::torrent_peer::TorrentPeer;
use crate::tracker::structs::torrent_peers::TorrentPeers;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub fn get_torrent_peers(&self, info_hash: InfoHash, amount: usize, ip_type: TorrentPeersType, self_ip: Option<IpAddr>) -> Option<TorrentPeers>
{
self.get_torrent(info_hash).map(|data| {
let mut returned_data = TorrentPeers {
seeds_ipv4: BTreeMap::new(),
seeds_ipv6: BTreeMap::new(),
peers_ipv4: BTreeMap::new(),
peers_ipv6: BTreeMap::new()
};
match ip_type {
TorrentPeersType::All => {
returned_data.seeds_ipv4 = self.get_peers(&data.seeds, TorrentPeersType::IPv4, self_ip, amount);
returned_data.seeds_ipv6 = self.get_peers(&data.seeds, TorrentPeersType::IPv6, self_ip, amount);
returned_data.peers_ipv4 = self.get_peers(&data.peers, TorrentPeersType::IPv4, self_ip, amount);
returned_data.peers_ipv6 = self.get_peers(&data.peers, TorrentPeersType::IPv6, self_ip, amount);
}
TorrentPeersType::IPv4 => {
returned_data.seeds_ipv4 = self.get_peers(&data.seeds, TorrentPeersType::IPv4, self_ip, amount);
returned_data.peers_ipv4 = self.get_peers(&data.peers, TorrentPeersType::IPv4, self_ip, amount);
}
TorrentPeersType::IPv6 => {
returned_data.seeds_ipv6 = self.get_peers(&data.seeds, TorrentPeersType::IPv6, self_ip, amount);
returned_data.peers_ipv6 = self.get_peers(&data.peers, TorrentPeersType::IPv6, self_ip, amount);
}
}
returned_data
})
}
#[tracing::instrument(level = "debug")]
pub fn get_peers(&self, peers: &BTreeMap<PeerId, TorrentPeer>, type_ip: TorrentPeersType, self_ip: Option<IpAddr>, amount: usize) -> BTreeMap<PeerId, TorrentPeer>
{
let should_include = |peer_addr: &SocketAddr| -> bool {
let ip_type_match = match type_ip {
TorrentPeersType::All => return false,
TorrentPeersType::IPv4 => peer_addr.is_ipv4(),
TorrentPeersType::IPv6 => peer_addr.is_ipv6(),
};
ip_type_match && self_ip.is_none_or(|ip| ip != peer_addr.ip())
};
let iter = peers.iter()
.filter_map(|(peer_id, torrent_peer)| {
if should_include(&torrent_peer.peer_addr) {
Some((*peer_id, torrent_peer.clone()))
} else {
None
}
});
if amount != 0 {
iter.take(amount).collect()
} else {
iter.collect()
}
}
#[tracing::instrument(level = "debug")]
pub fn add_torrent_peer(&self, info_hash: InfoHash, peer_id: PeerId, torrent_peer: TorrentPeer, completed: bool) -> (Option<TorrentEntry>, TorrentEntry)
{
let shard = self.torrents_sharding.get_shard(info_hash.0[0]).unwrap();
let mut lock = shard.write();
match lock.entry(info_hash) {
Entry::Vacant(v) => {
let mut torrent_entry = TorrentEntry {
seeds: BTreeMap::new(),
peers: BTreeMap::new(),
completed: 0,
updated: std::time::Instant::now()
};
if completed && torrent_peer.left == NumberOfBytes(0) {
self.update_stats(StatsEvent::Completed, 1);
torrent_entry.completed = 1;
}
self.update_stats(StatsEvent::Torrents, 1);
if torrent_peer.left == NumberOfBytes(0) {
self.update_stats(StatsEvent::Seeds, 1);
torrent_entry.seeds.insert(peer_id, torrent_peer);
} else {
self.update_stats(StatsEvent::Peers, 1);
torrent_entry.peers.insert(peer_id, torrent_peer);
}
let entry_clone = torrent_entry.clone();
v.insert(torrent_entry);
(None, entry_clone)
}
Entry::Occupied(mut o) => {
let previous_torrent = o.get().clone();
let entry = o.get_mut();
let was_seed = entry.seeds.remove(&peer_id).is_some();
let was_peer = entry.peers.remove(&peer_id).is_some();
if was_seed {
self.update_stats(StatsEvent::Seeds, -1);
}
if was_peer {
self.update_stats(StatsEvent::Peers, -1);
}
if completed {
self.update_stats(StatsEvent::Completed, 1);
entry.completed += 1;
}
if torrent_peer.left == NumberOfBytes(0) {
self.update_stats(StatsEvent::Seeds, 1);
entry.seeds.insert(peer_id, torrent_peer);
} else {
self.update_stats(StatsEvent::Peers, 1);
entry.peers.insert(peer_id, torrent_peer);
}
entry.updated = std::time::Instant::now();
(Some(previous_torrent), entry.clone())
}
}
}
#[tracing::instrument(level = "debug")]
pub fn remove_torrent_peer(&self, info_hash: InfoHash, peer_id: PeerId, persistent: bool, cleanup: bool) -> (Option<TorrentEntry>, Option<TorrentEntry>)
{
if !self.torrents_sharding.contains_peer(info_hash, peer_id) {
return (None, None);
}
let shard = self.torrents_sharding.get_shard(info_hash.0[0]).unwrap();
let mut lock = shard.write();
match lock.entry(info_hash) {
Entry::Vacant(_) => (None, None),
Entry::Occupied(mut o) => {
if cleanup {
info!("[PEERS] Removing from torrent {info_hash} peer {peer_id}");
}
let previous_torrent = o.get().clone();
let entry = o.get_mut();
let was_seed = entry.seeds.remove(&peer_id).is_some();
let was_peer = entry.peers.remove(&peer_id).is_some();
if was_seed {
self.update_stats(StatsEvent::Seeds, -1);
}
if was_peer {
self.update_stats(StatsEvent::Peers, -1);
}
if !persistent && entry.seeds.is_empty() && entry.peers.is_empty() {
o.remove();
self.update_stats(StatsEvent::Torrents, -1);
(Some(previous_torrent), None)
} else {
(Some(previous_torrent), Some(entry.clone()))
}
}
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_peer.rs | src/tracker/impls/torrent_peer.rs | use std::net::{IpAddr, SocketAddr};
use crate::tracker::structs::torrent_peer::TorrentPeer;
impl TorrentPeer {
pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, port: u16) -> SocketAddr {
SocketAddr::new(remote_ip, port)
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/peer_id.rs | src/tracker/impls/peer_id.rs | use std::fmt;
use std::fmt::Formatter;
use serde::Serialize;
use crate::common::common::bin2hex;
use crate::tracker::structs::peer_id::PeerId;
impl fmt::Display for PeerId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
bin2hex(&self.0, f)
}
}
impl PeerId {
pub fn get_client_name(&self) -> Option<&'static str> {
if self.0[0] == b'M' {
return Some("BitTorrent");
}
if self.0[0] == b'-' {
let name = match &self.0[1..3] {
b"AG" => "Ares",
b"A~" => "Ares",
b"AR" => "Arctic",
b"AV" => "Avicora",
b"AX" => "BitPump",
b"AZ" => "Azureus",
b"BB" => "BitBuddy",
b"BC" => "BitComet",
b"BF" => "Bitflu",
b"BG" => "BTG (uses Rasterbar libtorrent)",
b"BR" => "BitRocket",
b"BS" => "BTSlave",
b"BX" => "~Bittorrent X",
b"CD" => "Enhanced CTorrent",
b"CT" => "CTorrent",
b"DE" => "DelugeTorrent",
b"DP" => "Propagate Data Client",
b"EB" => "EBit",
b"ES" => "electric sheep",
b"FT" => "FoxTorrent",
b"FW" => "FrostWire",
b"FX" => "Freebox BitTorrent",
b"GS" => "GSTorrent",
b"HL" => "Halite",
b"HN" => "Hydranode",
b"KG" => "KGet",
b"KT" => "KTorrent",
b"LH" => "LH-ABC",
b"LP" => "Lphant",
b"LT" => "libtorrent",
b"lt" => "libTorrent",
b"LW" => "LimeWire",
b"MO" => "MonoTorrent",
b"MP" => "MooPolice",
b"MR" => "Miro",
b"MT" => "MoonlightTorrent",
b"NX" => "Net Transport",
b"PD" => "Pando",
b"PI" => "PicoTorrent",
b"qB" => "qBittorrent",
b"QD" => "QQDownload",
b"QT" => "Qt 4 Torrent example",
b"RT" => "Retriever",
b"S~" => "Shareaza alpha/beta",
b"SB" => "~Swiftbit",
b"SS" => "SwarmScope",
b"ST" => "SymTorrent",
b"st" => "sharktorrent",
b"SZ" => "Shareaza",
b"TN" => "TorrentDotNET",
b"TR" => "Transmission",
b"TS" => "Torrentstorm",
b"TT" => "TuoTu",
b"UL" => "uLeecher!",
b"UT" => "µTorrent",
b"UW" => "µTorrent Web",
b"VG" => "Vagaa",
b"WD" => "WebTorrent Desktop",
b"WT" => "BitLet",
b"WW" => "WebTorrent",
b"WY" => "FireTorrent",
b"XL" => "Xunlei",
b"XT" => "XanTorrent",
b"XX" => "Xtorrent",
b"ZT" => "ZipTorrent",
_ => return None,
};
Some(name)
} else {
None
}
}
}
impl Serialize for PeerId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
let mut buffer = [0u8; 40];
for (i, &byte) in self.0.iter().enumerate() {
buffer[i * 2] = HEX_CHARS[(byte >> 4) as usize];
buffer[i * 2 + 1] = HEX_CHARS[(byte & 0xf) as usize];
}
// SAFETY: We know the buffer contains only valid ASCII hex characters
let id = unsafe { std::str::from_utf8_unchecked(&buffer) };
#[derive(Serialize)]
struct PeerIdInfo<'a> {
id: &'a str,
client: Option<&'a str>,
}
let obj = PeerIdInfo {
id,
client: self.get_client_name(),
};
obj.serialize(serializer)
}
}
impl std::str::FromStr for PeerId {
type Err = binascii::ConvertError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.len() != 40 {
return Err(binascii::ConvertError::InvalidInputLength);
}
let mut result = PeerId([0u8; 20]);
let bytes = s.as_bytes();
for i in 0..20 {
let high = hex_char_to_nibble(bytes[i * 2]);
let low = hex_char_to_nibble(bytes[i * 2 + 1]);
if high == 0xFF || low == 0xFF {
return Err(binascii::ConvertError::InvalidInput);
}
result.0[i] = (high << 4) | low;
}
Ok(result)
}
}
impl From<&[u8]> for PeerId {
fn from(data: &[u8]) -> PeerId {
assert_eq!(data.len(), 20);
let mut ret = PeerId([0u8; 20]);
ret.0.copy_from_slice(data);
ret
}
}
impl<'de> serde::de::Deserialize<'de> for PeerId {
fn deserialize<D: serde::de::Deserializer<'de>>(des: D) -> Result<Self, D::Error> {
struct PeerIdVisitor;
impl<'de> serde::de::Visitor<'de> for PeerIdVisitor {
type Value = PeerId;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a 40 character long hash")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() != 40 {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(v),
&"expected a 40 character long string",
));
}
let mut res = PeerId([0u8; 20]);
let bytes = v.as_bytes();
for i in 0..20 {
let high = hex_char_to_nibble(bytes[i * 2]);
let low = hex_char_to_nibble(bytes[i * 2 + 1]);
if high == 0xFF || low == 0xFF {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(v),
&"expected a hexadecimal string",
));
}
res.0[i] = (high << 4) | low;
}
Ok(res)
}
}
des.deserialize_str(PeerIdVisitor)
}
}
#[inline(always)]
fn hex_char_to_nibble(c: u8) -> u8 {
match c {
b'0'..=b'9' => c - b'0',
b'a'..=b'f' => c - b'a' + 10,
b'A'..=b'F' => c - b'A' + 10,
_ => 0xFF,
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_entry.rs | src/tracker/impls/torrent_entry.rs | use std::collections::BTreeMap;
use crate::tracker::structs::torrent_entry::TorrentEntry;
impl TorrentEntry {
pub fn new() -> TorrentEntry {
TorrentEntry {
peers: BTreeMap::new(),
seeds: BTreeMap::new(),
completed: 0u64,
updated: std::time::Instant::now(),
}
}
}
impl Default for TorrentEntry {
fn default() -> Self {
Self::new()
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_keys_updates.rs | src/tracker/impls/torrent_tracker_keys_updates.rs | use std::collections::{BTreeMap, HashMap};
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::time::SystemTime;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub fn add_key_update(&self, info_hash: InfoHash, timeout: i64, updates_action: UpdatesAction) -> bool
{
let mut lock = self.keys_updates.write();
let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
if lock.insert(timestamp, (info_hash, timeout, updates_action)).is_none() {
self.update_stats(StatsEvent::KeyUpdates, 1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn get_key_updates(&self) -> HashMap<u128, (InfoHash, i64, UpdatesAction)>
{
let lock = self.keys_updates.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn remove_key_update(&self, timestamp: &u128) -> bool
{
let mut lock = self.keys_updates.write();
if lock.remove(timestamp).is_some() {
self.update_stats(StatsEvent::KeyUpdates, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn clear_key_updates(&self)
{
let mut lock = self.keys_updates.write();
lock.clear();
self.set_stats(StatsEvent::KeyUpdates, 0);
}
#[tracing::instrument(level = "debug")]
pub async fn save_key_updates(&self, torrent_tracker: Arc<TorrentTracker>) -> Result<(), ()>
{
let updates = self.get_key_updates();
let mut mapping: HashMap<InfoHash, (u128, i64, UpdatesAction)> = HashMap::new();
let mut timestamps_to_remove = Vec::new();
for (timestamp, (info_hash, timeout, updates_action)) in updates {
match mapping.entry(info_hash) {
Entry::Occupied(mut o) => {
let existing = o.get();
if timestamp > existing.0 {
timestamps_to_remove.push(existing.0);
o.insert((timestamp, timeout, updates_action));
} else {
timestamps_to_remove.push(timestamp);
}
}
Entry::Vacant(v) => {
v.insert((timestamp, timeout, updates_action));
}
}
}
let keys_to_save: BTreeMap<InfoHash, (i64, UpdatesAction)> = mapping
.iter()
.map(|(info_hash, (_, timeout, updates_action))| (*info_hash, (*timeout, *updates_action)))
.collect();
match self.save_keys(torrent_tracker, keys_to_save).await {
Ok(_) => {
info!("[SYNC KEY UPDATES] Synced {} keys", mapping.len());
for (_, (timestamp, _, _)) in mapping {
self.remove_key_update(×tamp);
}
for timestamp in timestamps_to_remove {
self.remove_key_update(×tamp);
}
Ok(())
}
Err(_) => {
error!("[SYNC KEY UPDATES] Unable to sync {} keys", mapping.len());
Err(())
}
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_keys.rs | src/tracker/impls/torrent_tracker_keys.rs | use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use chrono::{TimeZone, Utc};
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub async fn load_keys(&self, tracker: Arc<TorrentTracker>)
{
if let Ok(keys) = self.sqlx.load_keys(tracker).await {
info!("Loaded {keys} keys");
}
}
#[tracing::instrument(level = "debug")]
pub async fn save_keys(&self, tracker: Arc<TorrentTracker>, keys: BTreeMap<InfoHash, (i64, UpdatesAction)>) -> Result<(), ()>
{
match self.sqlx.save_keys(tracker, keys).await {
Ok(keys_count) => {
info!("[SYNC KEYS] Synced {keys_count} keys");
Ok(())
}
Err(_) => {
error!("[SYNC KEYS] Unable to sync keys");
Err(())
}
}
}
#[tracing::instrument(level = "debug")]
pub fn add_key(&self, hash: InfoHash, timeout: i64) -> bool
{
let mut lock = self.keys.write();
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let timeout_unix = timestamp.as_secs() as i64 + timeout;
match lock.entry(hash) {
Entry::Vacant(v) => {
self.update_stats(StatsEvent::Key, 1);
v.insert(timeout_unix);
true
}
Entry::Occupied(mut o) => {
o.insert(timeout_unix);
false
}
}
}
#[tracing::instrument(level = "debug")]
pub fn get_key(&self, hash: InfoHash) -> Option<(InfoHash, i64)>
{
let lock = self.keys.read_recursive();
lock.get(&hash).map(|&data| (hash, data))
}
#[tracing::instrument(level = "debug")]
pub fn get_keys(&self) -> BTreeMap<InfoHash, i64>
{
let lock = self.keys.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn remove_key(&self, hash: InfoHash) -> bool
{
let mut lock = self.keys.write();
if lock.remove(&hash).is_some() {
self.update_stats(StatsEvent::Key, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn check_key(&self, hash: InfoHash) -> bool
{
let lock = self.keys.read_recursive();
lock.get(&hash).is_some_and(|&key| {
let key_time = Utc.timestamp_opt(key, 0)
.single()
.map(SystemTime::from)
.unwrap_or(UNIX_EPOCH);
key_time > SystemTime::now()
})
}
#[tracing::instrument(level = "debug")]
pub fn clear_keys(&self)
{
let mut lock = self.keys.write();
lock.clear();
self.set_stats(StatsEvent::Key, 0);
}
#[tracing::instrument(level = "debug")]
pub fn clean_keys(&self)
{
let now = SystemTime::now();
let mut keys_to_remove = Vec::new();
{
let lock = self.keys.read_recursive();
for (&hash, &key_time) in lock.iter() {
let time = Utc.timestamp_opt(key_time, 0)
.single()
.map(SystemTime::from)
.unwrap_or(UNIX_EPOCH);
if time <= now {
keys_to_remove.push(hash);
}
}
}
for hash in keys_to_remove {
self.remove_key(hash);
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/torrent_tracker_torrents_updates.rs | src/tracker/impls/torrent_tracker_torrents_updates.rs | use std::collections::{BTreeMap, HashMap};
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::time::SystemTime;
use log::{error, info};
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_entry::TorrentEntry;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub fn add_torrent_update(&self, info_hash: InfoHash, torrent_entry: TorrentEntry, updates_action: UpdatesAction) -> bool
{
let mut lock = self.torrents_updates.write();
let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
if lock.insert(timestamp, (info_hash, torrent_entry, updates_action)).is_none() {
self.update_stats(StatsEvent::TorrentsUpdates, 1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn add_torrent_updates(&self, hashes: HashMap<u128, (InfoHash, TorrentEntry, UpdatesAction)>) -> BTreeMap<InfoHash, bool>
{
let mut lock = self.torrents_updates.write();
let mut returned_data = BTreeMap::new();
let mut success_count = 0i64;
let mut remove_count = 0i64;
for (timestamp, (info_hash, torrent_entry, updates_action)) in hashes {
let new_timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos();
let success = lock.insert(new_timestamp, (info_hash, torrent_entry, updates_action)).is_none();
if success {
success_count += 1;
}
returned_data.insert(info_hash, success);
if lock.remove(×tamp).is_some() {
remove_count += 1;
}
}
if success_count > 0 {
self.update_stats(StatsEvent::TorrentsUpdates, success_count);
}
if remove_count > 0 {
self.update_stats(StatsEvent::TorrentsUpdates, -remove_count);
}
returned_data
}
#[tracing::instrument(level = "debug")]
pub fn get_torrent_updates(&self) -> HashMap<u128, (InfoHash, TorrentEntry, UpdatesAction)>
{
let lock = self.torrents_updates.read_recursive();
lock.clone()
}
#[tracing::instrument(level = "debug")]
pub fn remove_torrent_update(&self, timestamp: &u128) -> bool
{
let mut lock = self.torrents_updates.write();
if lock.remove(timestamp).is_some() {
self.update_stats(StatsEvent::TorrentsUpdates, -1);
true
} else {
false
}
}
#[tracing::instrument(level = "debug")]
pub fn clear_torrent_updates(&self)
{
let mut lock = self.torrents_updates.write();
lock.clear();
self.set_stats(StatsEvent::TorrentsUpdates, 0);
}
#[tracing::instrument(level = "debug")]
pub async fn save_torrent_updates(&self, torrent_tracker: Arc<TorrentTracker>) -> Result<(), ()>
{
let updates = {
let lock = self.torrents_updates.read_recursive();
lock.clone()
};
if updates.is_empty() {
return Ok(());
}
let mut mapping: HashMap<InfoHash, (u128, TorrentEntry, UpdatesAction)> = HashMap::with_capacity(updates.len());
let mut timestamps_to_remove = Vec::new();
for (timestamp, (info_hash, torrent_entry, updates_action)) in updates {
match mapping.entry(info_hash) {
Entry::Occupied(mut o) => {
let existing = o.get();
if timestamp > existing.0 {
timestamps_to_remove.push(existing.0);
o.insert((timestamp, torrent_entry, updates_action));
} else {
timestamps_to_remove.push(timestamp);
}
}
Entry::Vacant(v) => {
v.insert((timestamp, torrent_entry, updates_action));
}
}
}
let mapping_len = mapping.len();
let torrents_to_save: BTreeMap<InfoHash, (TorrentEntry, UpdatesAction)> = mapping
.iter()
.map(|(info_hash, (_, torrent_entry, updates_action))| (*info_hash, (torrent_entry.clone(), *updates_action)))
.collect();
match self.save_torrents(torrent_tracker, torrents_to_save).await {
Ok(_) => {
info!("[SYNC TORRENT UPDATES] Synced {mapping_len} torrents");
let mut lock = self.torrents_updates.write();
let mut removed_count = 0i64;
for (_, (timestamp, _, _)) in mapping {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
for timestamp in timestamps_to_remove {
if lock.remove(×tamp).is_some() {
removed_count += 1;
}
}
if removed_count > 0 {
self.update_stats(StatsEvent::TorrentsUpdates, -removed_count);
}
Ok(())
}
Err(_) => {
error!("[SYNC TORRENT UPDATES] Unable to sync {mapping_len} torrents");
Err(())
}
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/impls/announce_event.rs | src/tracker/impls/announce_event.rs | use crate::tracker::enums::announce_event::AnnounceEvent;
impl AnnounceEvent {
#[inline]
pub const fn from_i32(i: i32) -> Self {
match i {
1 => Self::Completed,
2 => Self::Started,
3 => Self::Stopped,
_ => Self::None,
}
}
#[inline]
pub const fn to_i32(self) -> i32 {
match self {
AnnounceEvent::None => 0,
AnnounceEvent::Completed => 1,
AnnounceEvent::Started => 2,
AnnounceEvent::Stopped => 3,
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/types/users_updates.rs | src/tracker/types/users_updates.rs | use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::user_entry_item::UserEntryItem;
use crate::tracker::structs::user_id::UserId;
pub type UsersUpdates = Arc<RwLock<HashMap<u128, (UserId, UserEntryItem, UpdatesAction)>>>; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/types/keys_updates.rs | src/tracker/types/keys_updates.rs | use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
pub type KeysUpdates = Arc<RwLock<HashMap<u128, (InfoHash, i64, UpdatesAction)>>>; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/types/torrents_updates.rs | src/tracker/types/torrents_updates.rs | use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_entry::TorrentEntry;
pub type TorrentsUpdates = Arc<RwLock<HashMap<u128, (InfoHash, TorrentEntry, UpdatesAction)>>>; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/torrent_tracker.rs | src/tracker/structs/torrent_tracker.rs | use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use parking_lot::RwLock;
use crate::config::structs::configuration::Configuration;
use crate::database::structs::database_connector::DatabaseConnector;
use crate::stats::structs::stats_atomics::StatsAtomics;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_sharding::TorrentSharding;
use crate::tracker::structs::user_entry_item::UserEntryItem;
use crate::tracker::structs::user_id::UserId;
use crate::tracker::types::keys_updates::KeysUpdates;
use crate::tracker::types::torrents_updates::TorrentsUpdates;
use crate::tracker::types::users_updates::UsersUpdates;
#[derive(Debug)]
pub struct TorrentTracker {
pub config: Arc<Configuration>,
pub sqlx: DatabaseConnector,
pub torrents_sharding: Arc<TorrentSharding>,
pub torrents_updates: TorrentsUpdates,
pub torrents_whitelist: Arc<RwLock<Vec<InfoHash>>>,
pub torrents_whitelist_updates: Arc<RwLock<HashMap<u128, (InfoHash, UpdatesAction)>>>,
pub torrents_blacklist: Arc<RwLock<Vec<InfoHash>>>,
pub torrents_blacklist_updates: Arc<RwLock<HashMap<u128, (InfoHash, UpdatesAction)>>>,
pub keys: Arc<RwLock<BTreeMap<InfoHash, i64>>>,
pub keys_updates: KeysUpdates,
pub users: Arc<RwLock<BTreeMap<UserId, UserEntryItem>>>,
pub users_updates: UsersUpdates,
pub stats: Arc<StatsAtomics>
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/user_id.rs | src/tracker/structs/user_id.rs | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
pub struct UserId(pub [u8; 20]); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/info_hash.rs | src/tracker/structs/info_hash.rs | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
pub struct InfoHash(pub [u8; 20]); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/padded_atomic_u64.rs | src/tracker/structs/padded_atomic_u64.rs | use std::sync::atomic::AtomicU64;
use crate::tracker::impls::torrent_sharding::CACHE_LINE_SIZE;
#[repr(align(64))]
pub struct PaddedAtomicU64 {
pub value: AtomicU64,
pub _padding: [u8; CACHE_LINE_SIZE - std::mem::size_of::<AtomicU64>()],
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/torrent_sharding.rs | src/tracker/structs/torrent_sharding.rs | use std::collections::BTreeMap;
use std::sync::Arc;
use parking_lot::RwLock;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_entry::TorrentEntry;
#[derive(Debug)]
pub struct TorrentSharding {
pub shards: [Arc<RwLock<BTreeMap<InfoHash, TorrentEntry>>>; 256],
}
| rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/torrent_peer.rs | src/tracker/structs/torrent_peer.rs | use std::net::SocketAddr;
use serde::Serialize;
use crate::common::structs::number_of_bytes::NumberOfBytes;
use crate::common::structs::number_of_bytes_def::NumberOfBytesDef;
use crate::tracker::enums::announce_event::AnnounceEvent;
use crate::tracker::enums::announce_event_def::AnnounceEventDef;
use crate::tracker::structs::peer_id::PeerId;
#[derive(PartialEq, Eq, Debug, Clone, Serialize)]
pub struct TorrentPeer {
pub peer_id: PeerId,
pub peer_addr: SocketAddr,
#[serde(with = "serde_millis")]
pub updated: std::time::Instant,
#[serde(with = "NumberOfBytesDef")]
pub uploaded: NumberOfBytes,
#[serde(with = "NumberOfBytesDef")]
pub downloaded: NumberOfBytes,
#[serde(with = "NumberOfBytesDef")]
pub left: NumberOfBytes,
#[serde(with = "AnnounceEventDef")]
pub event: AnnounceEvent,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/peer_id.rs | src/tracker/structs/peer_id.rs | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, PartialOrd, Ord)]
pub struct PeerId(pub [u8; 20]); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/cleanup_stats_atomic.rs | src/tracker/structs/cleanup_stats_atomic.rs | use crate::tracker::structs::padded_atomic_u64::PaddedAtomicU64;
pub struct CleanupStatsAtomic {
pub torrents: PaddedAtomicU64,
pub seeds: PaddedAtomicU64,
pub peers: PaddedAtomicU64,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/announce_query_request.rs | src/tracker/structs/announce_query_request.rs | use std::net::IpAddr;
use serde::Deserialize;
use crate::tracker::enums::announce_event::AnnounceEvent;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::peer_id::PeerId;
#[derive(Deserialize, Clone, Debug)]
#[allow(dead_code)]
pub struct AnnounceQueryRequest {
pub(crate) info_hash: InfoHash,
pub(crate) peer_id: PeerId,
pub(crate) port: u16,
pub(crate) uploaded: u64,
pub(crate) downloaded: u64,
pub(crate) left: u64,
pub(crate) compact: bool,
pub(crate) no_peer_id: bool,
pub(crate) event: AnnounceEvent,
pub(crate) remote_addr: IpAddr,
pub(crate) numwant: u64,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/torrent_entry.rs | src/tracker/structs/torrent_entry.rs | use std::collections::BTreeMap;
use serde::Serialize;
use crate::tracker::structs::peer_id::PeerId;
use crate::tracker::structs::torrent_peer::TorrentPeer;
#[derive(Serialize, Clone, Debug)]
pub struct TorrentEntry {
#[serde(skip_serializing)]
pub seeds: BTreeMap<PeerId, TorrentPeer>,
#[serde(skip_serializing)]
pub peers: BTreeMap<PeerId, TorrentPeer>,
pub completed: u64,
#[serde(with = "serde_millis")]
pub updated: std::time::Instant
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/scrape_query_request.rs | src/tracker/structs/scrape_query_request.rs | use serde::Deserialize;
use crate::tracker::structs::info_hash::InfoHash;
#[derive(Deserialize, Clone, Debug)]
#[allow(dead_code)]
pub struct ScrapeQueryRequest {
pub(crate) info_hash: Vec<InfoHash>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/torrent_peers.rs | src/tracker/structs/torrent_peers.rs | use std::collections::BTreeMap;
use serde::Serialize;
use crate::tracker::structs::peer_id::PeerId;
use crate::tracker::structs::torrent_peer::TorrentPeer;
#[derive(Serialize, Debug)]
pub struct TorrentPeers {
pub(crate) seeds_ipv4: BTreeMap<PeerId, TorrentPeer>,
pub(crate) seeds_ipv6: BTreeMap<PeerId, TorrentPeer>,
pub(crate) peers_ipv4: BTreeMap<PeerId, TorrentPeer>,
pub(crate) peers_ipv6: BTreeMap<PeerId, TorrentPeer>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/structs/user_entry_item.rs | src/tracker/structs/user_entry_item.rs | use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::user_id::UserId;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct UserEntryItem {
pub key: UserId,
pub user_id: Option<u64>,
pub user_uuid: Option<String>,
pub uploaded: u64,
pub downloaded: u64,
pub completed: u64,
pub updated: u64,
pub active: u8,
pub torrents_active: BTreeMap<InfoHash, u64>
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/enums/updates_action.rs | src/tracker/enums/updates_action.rs | use serde::Deserialize;
#[derive(Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum UpdatesAction {
Add,
Remove,
Update,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/enums/announce_event_def.rs | src/tracker/enums/announce_event_def.rs | use serde::{Deserialize, Serialize};
use crate::tracker::enums::announce_event::AnnounceEvent;
#[derive(Serialize, Deserialize)]
#[serde(remote = "AnnounceEvent")]
pub enum AnnounceEventDef {
Started,
Stopped,
Completed,
None,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/enums/torrent_peers_type.rs | src/tracker/enums/torrent_peers_type.rs |
#[derive(Debug)]
pub enum TorrentPeersType {
All,
IPv4,
IPv6
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/tracker/enums/announce_event.rs | src/tracker/enums/announce_event.rs | use serde::Deserialize;
#[derive(Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum AnnounceEvent {
None = 0,
Completed = 1,
Started = 2,
Stopped = 3
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/tests.rs | src/stats/tests.rs | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false | |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/structs.rs | src/stats/structs.rs | pub mod stats;
pub mod stats_atomics; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/enums.rs | src/stats/enums.rs | pub mod stats_event; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/mod.rs | src/stats/mod.rs | pub mod enums;
pub mod impls;
pub mod structs;
pub mod tests; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/impls.rs | src/stats/impls.rs | pub mod torrent_tracker; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/impls/torrent_tracker.rs | src/stats/impls/torrent_tracker.rs | use std::sync::atomic::Ordering;
use crate::stats::enums::stats_event::StatsEvent;
use crate::stats::structs::stats::Stats;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
impl TorrentTracker {
#[tracing::instrument(level = "debug")]
pub fn get_stats(&self) -> Stats
{
Stats {
started: self.stats.started.load(Ordering::Relaxed),
timestamp_run_save: self.stats.timestamp_run_save.load(Ordering::Relaxed),
timestamp_run_timeout: self.stats.timestamp_run_timeout.load(Ordering::Relaxed),
timestamp_run_console: self.stats.timestamp_run_console.load(Ordering::Relaxed),
timestamp_run_keys_timeout: self.stats.timestamp_run_keys_timeout.load(Ordering::Relaxed),
torrents: self.stats.torrents.load(Ordering::Relaxed),
torrents_updates: self.stats.torrents_updates.load(Ordering::Relaxed),
users: self.stats.users.load(Ordering::Relaxed),
users_updates: self.stats.users_updates.load(Ordering::Relaxed),
seeds: self.stats.seeds.load(Ordering::Relaxed),
peers: self.stats.peers.load(Ordering::Relaxed),
completed: self.stats.completed.load(Ordering::Relaxed),
whitelist_enabled: self.stats.whitelist_enabled.load(Ordering::Relaxed),
whitelist: self.stats.whitelist.load(Ordering::Relaxed),
whitelist_updates: self.stats.whitelist_updates.load(Ordering::Relaxed),
blacklist_enabled: self.stats.blacklist_enabled.load(Ordering::Relaxed),
blacklist: self.stats.blacklist.load(Ordering::Relaxed),
blacklist_updates: self.stats.blacklist_updates.load(Ordering::Relaxed),
keys_enabled: self.stats.keys_enabled.load(Ordering::Relaxed),
keys: self.stats.keys.load(Ordering::Relaxed),
keys_updates: self.stats.keys_updates.load(Ordering::Relaxed),
tcp4_not_found: self.stats.tcp4_not_found.load(Ordering::Relaxed),
tcp4_failure: self.stats.tcp4_failure.load(Ordering::Relaxed),
tcp4_connections_handled: self.stats.tcp4_connections_handled.load(Ordering::Relaxed),
tcp4_api_handled: self.stats.tcp4_api_handled.load(Ordering::Relaxed),
tcp4_announces_handled: self.stats.tcp4_announces_handled.load(Ordering::Relaxed),
tcp4_scrapes_handled: self.stats.tcp4_scrapes_handled.load(Ordering::Relaxed),
tcp6_not_found: self.stats.tcp6_not_found.load(Ordering::Relaxed),
tcp6_failure: self.stats.tcp6_failure.load(Ordering::Relaxed),
tcp6_connections_handled: self.stats.tcp6_connections_handled.load(Ordering::Relaxed),
tcp6_api_handled: self.stats.tcp6_api_handled.load(Ordering::Relaxed),
tcp6_announces_handled: self.stats.tcp6_announces_handled.load(Ordering::Relaxed),
tcp6_scrapes_handled: self.stats.tcp6_scrapes_handled.load(Ordering::Relaxed),
udp4_bad_request: self.stats.udp4_bad_request.load(Ordering::Relaxed),
udp4_invalid_request: self.stats.udp4_invalid_request.load(Ordering::Relaxed),
udp4_connections_handled: self.stats.udp4_connections_handled.load(Ordering::Relaxed),
udp4_announces_handled: self.stats.udp4_announces_handled.load(Ordering::Relaxed),
udp4_scrapes_handled: self.stats.udp4_scrapes_handled.load(Ordering::Relaxed),
udp6_bad_request: self.stats.udp6_bad_request.load(Ordering::Relaxed),
udp6_invalid_request: self.stats.udp6_invalid_request.load(Ordering::Relaxed),
udp6_connections_handled: self.stats.udp6_connections_handled.load(Ordering::Relaxed),
udp6_announces_handled: self.stats.udp6_announces_handled.load(Ordering::Relaxed),
udp6_scrapes_handled: self.stats.udp6_scrapes_handled.load(Ordering::Relaxed),
udp_queue_len: self.stats.udp_queue_len.load(Ordering::Relaxed),
}
}
#[tracing::instrument(level = "debug")]
pub fn update_stats(&self, event: StatsEvent, value: i64) -> Stats
{
match event {
StatsEvent::Torrents => {
self.update_counter(&self.stats.torrents, value);
}
StatsEvent::TorrentsUpdates => {
self.update_counter(&self.stats.torrents_updates, value);
}
StatsEvent::Users => {
self.update_counter(&self.stats.users, value);
}
StatsEvent::UsersUpdates => {
self.update_counter(&self.stats.users_updates, value);
}
StatsEvent::TimestampSave => {
self.update_counter(&self.stats.timestamp_run_save, value);
}
StatsEvent::TimestampTimeout => {
self.update_counter(&self.stats.timestamp_run_timeout, value);
}
StatsEvent::TimestampConsole => {
self.update_counter(&self.stats.timestamp_run_console, value);
}
StatsEvent::TimestampKeysTimeout => {
self.update_counter(&self.stats.timestamp_run_keys_timeout, value);
}
StatsEvent::Seeds => {
self.update_counter(&self.stats.seeds, value);
}
StatsEvent::Peers => {
self.update_counter(&self.stats.peers, value);
}
StatsEvent::Completed => {
self.update_counter(&self.stats.completed, value);
}
StatsEvent::WhitelistEnabled => {
self.stats.whitelist_enabled.store(value > 0, Ordering::Release);
}
StatsEvent::Whitelist => {
self.update_counter(&self.stats.whitelist, value);
}
StatsEvent::WhitelistUpdates => {
self.update_counter(&self.stats.whitelist_updates, value);
}
StatsEvent::BlacklistEnabled => {
self.stats.blacklist_enabled.store(value > 0, Ordering::Release);
}
StatsEvent::Blacklist => {
self.update_counter(&self.stats.blacklist, value);
}
StatsEvent::BlacklistUpdates => {
self.update_counter(&self.stats.blacklist_updates, value);
}
StatsEvent::Key => {
self.update_counter(&self.stats.keys, value);
}
StatsEvent::KeyUpdates => {
self.update_counter(&self.stats.keys_updates, value);
}
StatsEvent::Tcp4NotFound => {
self.update_counter(&self.stats.tcp4_not_found, value);
}
StatsEvent::Tcp4Failure => {
self.update_counter(&self.stats.tcp4_failure, value);
}
StatsEvent::Tcp4ConnectionsHandled => {
self.update_counter(&self.stats.tcp4_connections_handled, value);
}
StatsEvent::Tcp4ApiHandled => {
self.update_counter(&self.stats.tcp4_api_handled, value);
}
StatsEvent::Tcp4AnnouncesHandled => {
self.update_counter(&self.stats.tcp4_announces_handled, value);
}
StatsEvent::Tcp4ScrapesHandled => {
self.update_counter(&self.stats.tcp4_scrapes_handled, value);
}
StatsEvent::Tcp6NotFound => {
self.update_counter(&self.stats.tcp6_not_found, value);
}
StatsEvent::Tcp6Failure => {
self.update_counter(&self.stats.tcp6_failure, value);
}
StatsEvent::Tcp6ConnectionsHandled => {
self.update_counter(&self.stats.tcp6_connections_handled, value);
}
StatsEvent::Tcp6ApiHandled => {
self.update_counter(&self.stats.tcp6_api_handled, value);
}
StatsEvent::Tcp6AnnouncesHandled => {
self.update_counter(&self.stats.tcp6_announces_handled, value);
}
StatsEvent::Tcp6ScrapesHandled => {
self.update_counter(&self.stats.tcp6_scrapes_handled, value);
}
StatsEvent::Udp4BadRequest => {
self.update_counter(&self.stats.udp4_bad_request, value);
}
StatsEvent::Udp4InvalidRequest => {
self.update_counter(&self.stats.udp4_invalid_request, value);
}
StatsEvent::Udp4ConnectionsHandled => {
self.update_counter(&self.stats.udp4_connections_handled, value);
}
StatsEvent::Udp4AnnouncesHandled => {
self.update_counter(&self.stats.udp4_announces_handled, value);
}
StatsEvent::Udp4ScrapesHandled => {
self.update_counter(&self.stats.udp4_scrapes_handled, value);
}
StatsEvent::Udp6BadRequest => {
self.update_counter(&self.stats.udp6_bad_request, value);
}
StatsEvent::Udp6InvalidRequest => {
self.update_counter(&self.stats.udp6_invalid_request, value);
}
StatsEvent::Udp6ConnectionsHandled => {
self.update_counter(&self.stats.udp6_connections_handled, value);
}
StatsEvent::Udp6AnnouncesHandled => {
self.update_counter(&self.stats.udp6_announces_handled, value);
}
StatsEvent::Udp6ScrapesHandled => {
self.update_counter(&self.stats.udp6_scrapes_handled, value);
}
StatsEvent::UdpQueueLen => {
self.stats.udp_queue_len.store(value, Ordering::Release);
}
};
self.get_stats()
}
#[tracing::instrument(level = "debug")]
pub fn set_stats(&self, event: StatsEvent, value: i64) -> Stats
{
match event {
StatsEvent::Torrents => {
self.stats.torrents.store(value, Ordering::Release);
}
StatsEvent::TorrentsUpdates => {
self.stats.torrents_updates.store(value, Ordering::Release);
}
StatsEvent::Users => {
self.stats.users.store(value, Ordering::Release);
}
StatsEvent::UsersUpdates => {
self.stats.users_updates.store(value, Ordering::Release);
}
StatsEvent::TimestampSave => {
self.stats.timestamp_run_save.store(value, Ordering::Release);
}
StatsEvent::TimestampTimeout => {
self.stats.timestamp_run_timeout.store(value, Ordering::Release);
}
StatsEvent::TimestampConsole => {
self.stats.timestamp_run_console.store(value, Ordering::Release);
}
StatsEvent::TimestampKeysTimeout => {
self.stats.timestamp_run_keys_timeout.store(value, Ordering::Release);
}
StatsEvent::Seeds => {
self.stats.seeds.store(value, Ordering::Release);
}
StatsEvent::Peers => {
self.stats.peers.store(value, Ordering::Release);
}
StatsEvent::Completed => {
self.stats.completed.store(value, Ordering::Release);
}
StatsEvent::WhitelistEnabled => {
self.stats.whitelist_enabled.store(value > 0, Ordering::Release);
}
StatsEvent::Whitelist => {
self.stats.whitelist.store(value, Ordering::Release);
}
StatsEvent::WhitelistUpdates => {
self.stats.whitelist_updates.store(value, Ordering::Release);
}
StatsEvent::BlacklistEnabled => {
self.stats.blacklist_enabled.store(value > 0, Ordering::Release);
}
StatsEvent::Blacklist => {
self.stats.blacklist.store(value, Ordering::Release);
}
StatsEvent::BlacklistUpdates => {
self.stats.blacklist_updates.store(value, Ordering::Release);
}
StatsEvent::Key => {
self.stats.keys.store(value, Ordering::Release);
}
StatsEvent::KeyUpdates => {
self.stats.keys_updates.store(value, Ordering::Release);
}
StatsEvent::Tcp4NotFound => {
self.stats.tcp4_not_found.store(value, Ordering::Release);
}
StatsEvent::Tcp4Failure => {
self.stats.tcp4_failure.store(value, Ordering::Release);
}
StatsEvent::Tcp4ConnectionsHandled => {
self.stats.tcp4_connections_handled.store(value, Ordering::Release);
}
StatsEvent::Tcp4ApiHandled => {
self.stats.tcp4_api_handled.store(value, Ordering::Release);
}
StatsEvent::Tcp4AnnouncesHandled => {
self.stats.tcp4_announces_handled.store(value, Ordering::Release);
}
StatsEvent::Tcp4ScrapesHandled => {
self.stats.tcp4_scrapes_handled.store(value, Ordering::Release);
}
StatsEvent::Tcp6NotFound => {
self.stats.tcp6_not_found.store(value, Ordering::Release);
}
StatsEvent::Tcp6Failure => {
self.stats.tcp6_failure.store(value, Ordering::Release);
}
StatsEvent::Tcp6ConnectionsHandled => {
self.stats.tcp6_connections_handled.store(value, Ordering::Release);
}
StatsEvent::Tcp6ApiHandled => {
self.stats.tcp6_api_handled.store(value, Ordering::Release);
}
StatsEvent::Tcp6AnnouncesHandled => {
self.stats.tcp6_announces_handled.store(value, Ordering::Release);
}
StatsEvent::Tcp6ScrapesHandled => {
self.stats.tcp6_scrapes_handled.store(value, Ordering::Release);
}
StatsEvent::Udp4BadRequest => {
self.stats.udp4_bad_request.store(value, Ordering::Release);
}
StatsEvent::Udp4InvalidRequest => {
self.stats.udp4_invalid_request.store(value, Ordering::Release);
}
StatsEvent::Udp4ConnectionsHandled => {
self.stats.udp4_connections_handled.store(value, Ordering::Release);
}
StatsEvent::Udp4AnnouncesHandled => {
self.stats.udp4_announces_handled.store(value, Ordering::Release);
}
StatsEvent::Udp4ScrapesHandled => {
self.stats.udp4_scrapes_handled.store(value, Ordering::Release);
}
StatsEvent::Udp6BadRequest => {
self.stats.udp6_bad_request.store(value, Ordering::Release);
}
StatsEvent::Udp6InvalidRequest => {
self.stats.udp6_invalid_request.store(value, Ordering::Release);
}
StatsEvent::Udp6ConnectionsHandled => {
self.stats.udp6_connections_handled.store(value, Ordering::Release);
}
StatsEvent::Udp6AnnouncesHandled => {
self.stats.udp6_announces_handled.store(value, Ordering::Release);
}
StatsEvent::Udp6ScrapesHandled => {
self.stats.udp6_scrapes_handled.store(value, Ordering::Release);
}
StatsEvent::UdpQueueLen => {
self.stats.udp_queue_len.store(value, Ordering::Release);
}
};
self.get_stats()
}
#[inline(always)]
fn update_counter(&self, counter: &std::sync::atomic::AtomicI64, value: i64) {
if value > 0 {
counter.fetch_add(value, Ordering::Release);
} else if value < 0 {
counter.fetch_sub(-value, Ordering::Release);
}
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/structs/stats.rs | src/stats/structs/stats.rs | use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone, Copy)]
pub struct Stats {
pub started: i64,
pub timestamp_run_save: i64,
pub timestamp_run_timeout: i64,
pub timestamp_run_console: i64,
pub timestamp_run_keys_timeout: i64,
pub torrents: i64,
pub torrents_updates: i64,
pub users: i64,
pub users_updates: i64,
pub seeds: i64,
pub peers: i64,
pub completed: i64,
pub whitelist_enabled: bool,
pub whitelist: i64,
pub whitelist_updates: i64,
pub blacklist_enabled: bool,
pub blacklist: i64,
pub blacklist_updates: i64,
pub keys_enabled: bool,
pub keys: i64,
pub keys_updates: i64,
pub tcp4_not_found: i64,
pub tcp4_failure: i64,
pub tcp4_connections_handled: i64,
pub tcp4_api_handled: i64,
pub tcp4_announces_handled: i64,
pub tcp4_scrapes_handled: i64,
pub tcp6_not_found: i64,
pub tcp6_failure: i64,
pub tcp6_connections_handled: i64,
pub tcp6_api_handled: i64,
pub tcp6_announces_handled: i64,
pub tcp6_scrapes_handled: i64,
pub udp4_bad_request: i64,
pub udp4_invalid_request: i64,
pub udp4_connections_handled: i64,
pub udp4_announces_handled: i64,
pub udp4_scrapes_handled: i64,
pub udp6_bad_request: i64,
pub udp6_invalid_request: i64,
pub udp6_connections_handled: i64,
pub udp6_announces_handled: i64,
pub udp6_scrapes_handled: i64,
pub udp_queue_len: i64,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/structs/stats_atomics.rs | src/stats/structs/stats_atomics.rs | use std::sync::atomic::{AtomicBool, AtomicI64};
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct StatsAtomics {
pub started: AtomicI64,
pub timestamp_run_save: AtomicI64,
pub timestamp_run_timeout: AtomicI64,
pub timestamp_run_console: AtomicI64,
pub timestamp_run_keys_timeout: AtomicI64,
pub torrents: AtomicI64,
pub torrents_updates: AtomicI64,
pub users: AtomicI64,
pub users_updates: AtomicI64,
pub seeds: AtomicI64,
pub peers: AtomicI64,
pub completed: AtomicI64,
pub whitelist_enabled: AtomicBool,
pub whitelist: AtomicI64,
pub whitelist_updates: AtomicI64,
pub blacklist_enabled: AtomicBool,
pub blacklist: AtomicI64,
pub blacklist_updates: AtomicI64,
pub keys_enabled: AtomicBool,
pub keys: AtomicI64,
pub keys_updates: AtomicI64,
pub tcp4_not_found: AtomicI64,
pub tcp4_failure: AtomicI64,
pub tcp4_connections_handled: AtomicI64,
pub tcp4_api_handled: AtomicI64,
pub tcp4_announces_handled: AtomicI64,
pub tcp4_scrapes_handled: AtomicI64,
pub tcp6_not_found: AtomicI64,
pub tcp6_failure: AtomicI64,
pub tcp6_connections_handled: AtomicI64,
pub tcp6_api_handled: AtomicI64,
pub tcp6_announces_handled: AtomicI64,
pub tcp6_scrapes_handled: AtomicI64,
pub udp4_bad_request: AtomicI64,
pub udp4_invalid_request: AtomicI64,
pub udp4_connections_handled: AtomicI64,
pub udp4_announces_handled: AtomicI64,
pub udp4_scrapes_handled: AtomicI64,
pub udp6_bad_request: AtomicI64,
pub udp6_invalid_request: AtomicI64,
pub udp6_connections_handled: AtomicI64,
pub udp6_announces_handled: AtomicI64,
pub udp6_scrapes_handled: AtomicI64,
pub udp_queue_len: AtomicI64,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/stats/enums/stats_event.rs | src/stats/enums/stats_event.rs | use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone, Copy)]
pub enum StatsEvent {
Torrents,
TorrentsUpdates,
Users,
UsersUpdates,
TimestampSave,
TimestampTimeout,
TimestampConsole,
TimestampKeysTimeout,
Seeds,
Peers,
Completed,
WhitelistEnabled,
Whitelist,
WhitelistUpdates,
BlacklistEnabled,
Blacklist,
BlacklistUpdates,
Key,
KeyUpdates,
Tcp4NotFound,
Tcp4Failure,
Tcp4ConnectionsHandled,
Tcp4ApiHandled,
Tcp4AnnouncesHandled,
Tcp4ScrapesHandled,
Tcp6NotFound,
Tcp6Failure,
Tcp6ConnectionsHandled,
Tcp6ApiHandled,
Tcp6AnnouncesHandled,
Tcp6ScrapesHandled,
Udp4BadRequest,
Udp4InvalidRequest,
Udp4ConnectionsHandled,
Udp4AnnouncesHandled,
Udp4ScrapesHandled,
Udp6BadRequest,
Udp6InvalidRequest,
Udp6ConnectionsHandled,
Udp6AnnouncesHandled,
Udp6ScrapesHandled,
UdpQueueLen
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/api_keys.rs | src/api/api_keys.rs | use std::collections::HashMap;
use std::sync::Arc;
use actix_web::{web, HttpRequest, HttpResponse};
use actix_web::http::header::ContentType;
use actix_web::web::Data;
use serde_json::json;
use crate::api::api::{api_parse_body, api_service_token, api_validation};
use crate::api::structs::api_service_data::ApiServiceData;
use crate::api::structs::query_token::QueryToken;
use crate::common::common::hex2bin;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
#[tracing::instrument(level = "debug")]
pub async fn api_service_key_get(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let key = path.into_inner();
if key.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad key_hash"}));
}
let key_hash = match hex2bin(key) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid key_hash"})),
};
match data.torrent_tracker.get_key(key_hash) {
None => HttpResponse::NotFound().content_type(ContentType::json()).json(json!({"status": "unknown key_hash"})),
Some((_, timeout)) => HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"timeout": timeout
})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_keys_get(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let keys = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut keys_output = HashMap::with_capacity(keys.len());
for key in keys {
if key.len() == 40 {
match hex2bin(key.clone()) {
Ok(hash) => {
let key_hash = InfoHash(hash);
let timeout = data.torrent_tracker.get_key(key_hash)
.map(|(_, timeout)| timeout as u64)
.unwrap_or(0u64);
keys_output.insert(key, timeout);
}
Err(_) => {
keys_output.insert(key, 0u64);
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"keys": keys_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_key_post(request: HttpRequest, path: web::Path<(String, u64)>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let (key, timeout) = path.into_inner();
if key.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad key_hash"}));
}
let key_hash = match hex2bin(key) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid key_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_key_update(key_hash, timeout as i64, UpdatesAction::Add);
}
match data.torrent_tracker.add_key(key_hash, timeout as i64) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "key_hash updated"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_keys_post(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let keys = match serde_json::from_slice::<HashMap<String, u64>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut keys_output = HashMap::with_capacity(keys.len());
for (key, timeout) in keys {
if key.len() == 40 {
match hex2bin(key.clone()) {
Ok(hash) => {
let key_hash = InfoHash(hash);
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_key_update(key_hash, timeout as i64, UpdatesAction::Add);
}
let status = match data.torrent_tracker.add_key(key_hash, timeout as i64) {
true => json!({"status": "ok"}),
false => json!({"status": "key_hash updated"}),
};
keys_output.insert(key, status);
}
Err(_) => {
keys_output.insert(key, json!({"status": "invalid key_hash"}));
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"keys": keys_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_key_delete(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let key = path.into_inner();
if key.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad key_hash"}));
}
let key_hash = match hex2bin(key) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid key"})),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_key_update(key_hash, 0i64, UpdatesAction::Remove);
}
match data.torrent_tracker.remove_key(key_hash) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "unknown key_hash"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_keys_delete(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let keys = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut keys_output = HashMap::with_capacity(keys.len());
for key_item in keys {
if key_item.len() == 40 {
match hex2bin(key_item.clone()) {
Ok(hash) => {
let key_hash = InfoHash(hash);
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_key_update(key_hash, 0i64, UpdatesAction::Remove);
}
let status = match data.torrent_tracker.remove_key(key_hash) {
true => json!({"status": "ok"}),
false => json!({"status": "unknown key_hash"}),
};
keys_output.insert(key_item, status);
}
Err(_) => {
keys_output.insert(key_item, json!({"status": "invalid key"}));
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"keys": keys_output
}))
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/api_stats.rs | src/api/api_stats.rs | use std::sync::Arc;
use actix_web::{web, HttpRequest, HttpResponse};
use actix_web::http::header::ContentType;
use actix_web::web::Data;
use crate::api::api::{api_service_token, api_validation};
use crate::api::structs::api_service_data::ApiServiceData;
use crate::api::structs::query_token::QueryToken;
#[tracing::instrument(level = "debug")]
pub async fn api_service_stats_get(request: HttpRequest, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
HttpResponse::Ok().content_type(ContentType::json()).json(data.torrent_tracker.get_stats())
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_prom_get(request: HttpRequest, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let stats = data.torrent_tracker.get_stats();
let prometheus_id = &data.torrent_tracker.config.tracker_config.prometheus_id;
let mut string_output = String::with_capacity(4096);
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "torrents", stats.torrents, true, Some(&format!("{prometheus_id} gauge metrics"))));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "torrents_updates", stats.torrents_updates, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "users", stats.users, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "users_updates", stats.users_updates, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "seeds", stats.seeds, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "peers", stats.peers, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "completed", stats.completed, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "whitelist", stats.whitelist, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "whitelist_updates", stats.whitelist_updates, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "blacklist", stats.blacklist, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "blacklist_updates", stats.blacklist_updates, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "keys", stats.keys, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "gauge", "keys_updates", stats.keys_updates, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp4_not_found", stats.tcp4_not_found, true, Some(&format!("{prometheus_id} counter metrics"))));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp4_failure", stats.tcp4_failure, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp4_connections_handled", stats.tcp4_connections_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp4_api_handled", stats.tcp4_api_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp4_announces_handled", stats.tcp4_announces_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp4_scrapes_handled", stats.tcp4_scrapes_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp6_not_found", stats.tcp6_not_found, true, Some(&format!("{prometheus_id} counter metrics"))));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp6_failure", stats.tcp6_failure, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp6_connections_handled", stats.tcp6_connections_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp6_api_handled", stats.tcp6_api_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp6_announces_handled", stats.tcp6_announces_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "tcp6_scrapes_handled", stats.tcp6_scrapes_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp4_bad_request", stats.udp4_bad_request, true, Some(&format!("{prometheus_id} counter metrics"))));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp4_invalid_request", stats.udp4_invalid_request, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp4_connections_handled", stats.udp4_connections_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp4_announces_handled", stats.udp4_announces_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp4_scrapes_handled", stats.udp4_scrapes_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp6_bad_request", stats.udp6_bad_request, true, Some(&format!("{prometheus_id} counter metrics"))));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp6_invalid_request", stats.udp6_invalid_request, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp6_connections_handled", stats.udp6_connections_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp6_announces_handled", stats.udp6_announces_handled, false, None));
string_output.push_str(&api_service_prom_generate_line(prometheus_id, "counter", "udp6_scrapes_handled", stats.udp6_scrapes_handled, false, None));
HttpResponse::Ok().content_type(ContentType::plaintext()).body(string_output)
}
pub fn api_service_prom_generate_line(id: &str, type_metric: &str, metric: &str, value: i64, without_header: bool, description: Option<&str>) -> String
{
if without_header {
format!(
"# HELP {}_{} {}\n# TYPE {}_{} {}\n{}_{}{{metric=\"{}\"}} {}\n",
id, type_metric, description.unwrap_or(""),
id, type_metric, type_metric,
id, type_metric, metric, value
)
} else {
format!("{id}_{type_metric}{{metric=\"{metric}\"}} {value}\n")
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/api_blacklists.rs | src/api/api_blacklists.rs | use std::collections::HashMap;
use std::sync::Arc;
use actix_web::{web, HttpRequest, HttpResponse};
use actix_web::http::header::ContentType;
use actix_web::web::Data;
use serde_json::json;
use crate::api::api::{api_parse_body, api_service_token, api_validation};
use crate::api::structs::api_service_data::ApiServiceData;
use crate::api::structs::query_token::QueryToken;
use crate::common::common::hex2bin;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
#[tracing::instrument(level = "debug")]
pub async fn api_service_blacklist_get(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
match data.torrent_tracker.check_blacklist(info_hash) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotFound().content_type(ContentType::json()).json(json!({"status": "unknown info_hash"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_blacklists_get(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let blacklists = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut blacklist_output = HashMap::with_capacity(blacklists.len());
for blacklist in blacklists {
if blacklist.len() == 40 {
match hex2bin(blacklist.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
blacklist_output.insert(blacklist, data.torrent_tracker.check_blacklist(info_hash));
}
Err(_) => {
blacklist_output.insert(blacklist, false);
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"blacklists": blacklist_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_blacklist_post(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Add);
}
match data.torrent_tracker.add_blacklist(info_hash) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "info_hash updated"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_blacklists_post(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let blacklists = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut blacklists_output = HashMap::with_capacity(blacklists.len());
for info in blacklists {
if info.len() == 40 {
match hex2bin(info.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Add);
}
let status = match data.torrent_tracker.add_blacklist(info_hash) {
true => json!({"status": "ok"}),
false => json!({"status": "info_hash updated"}),
};
blacklists_output.insert(info, status);
}
Err(_) => {
blacklists_output.insert(info, json!({"status": "invalid info_hash"}));
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"blacklists": blacklists_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_blacklist_delete(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Remove);
}
match data.torrent_tracker.remove_blacklist(info_hash) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "unknown info_hash"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_blacklists_delete(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let blacklists = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut blacklists_output = HashMap::with_capacity(blacklists.len());
for info in blacklists {
if info.len() == 40 {
match hex2bin(info.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Remove);
}
let status = match data.torrent_tracker.remove_blacklist(info_hash) {
true => json!({"status": "ok"}),
false => json!({"status": "unknown info_hash"}),
};
blacklists_output.insert(info, status);
}
Err(_) => {
blacklists_output.insert(info, json!({"status": "invalid info_hash"}));
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"blacklists": blacklists_output
}))
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/structs.rs | src/api/structs.rs | pub mod query_token;
pub mod api_service_data; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/api.rs | src/api/api.rs | use std::fs::File;
use std::future::Future;
use std::io::BufReader;
use std::net::{IpAddr, SocketAddr};
use std::process::exit;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use actix_cors::Cors;
use actix_web::{App, http, HttpRequest, HttpResponse, HttpServer, web};
use actix_web::dev::ServerHandle;
use actix_web::http::header::ContentType;
use actix_web::web::{BytesMut, Data, ServiceConfig};
use futures_util::StreamExt;
use log::{error, info};
use serde_json::json;
use utoipa_swagger_ui::{Config, SwaggerUi};
use crate::api::api_blacklists::{api_service_blacklist_delete, api_service_blacklist_get, api_service_blacklist_post, api_service_blacklists_delete, api_service_blacklists_get, api_service_blacklists_post};
use crate::api::api_keys::{api_service_key_delete, api_service_key_get, api_service_key_post, api_service_keys_delete, api_service_keys_get, api_service_keys_post};
use crate::api::api_stats::{api_service_prom_get, api_service_stats_get};
use crate::api::api_torrents::{api_service_torrent_delete, api_service_torrent_get, api_service_torrent_post, api_service_torrents_delete, api_service_torrents_get, api_service_torrents_post};
use crate::api::api_users::{api_service_user_delete, api_service_user_get, api_service_user_post, api_service_users_delete, api_service_users_get, api_service_users_post};
use crate::api::api_whitelists::{api_service_whitelist_delete, api_service_whitelist_get, api_service_whitelist_post, api_service_whitelists_delete, api_service_whitelists_get, api_service_whitelists_post};
use crate::api::structs::api_service_data::ApiServiceData;
use crate::common::structs::custom_error::CustomError;
use crate::config::structs::api_trackers_config::ApiTrackersConfig;
use crate::config::structs::configuration::Configuration;
use crate::stats::enums::stats_event::StatsEvent;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
#[tracing::instrument(level = "debug")]
pub fn api_service_cors() -> Cors
{
Cors::default()
.send_wildcard()
.allowed_methods(vec!["GET", "POST", "PUT", "DELETE"])
.allowed_headers(vec![http::header::X_FORWARDED_FOR, http::header::ACCEPT])
.allowed_header(http::header::CONTENT_TYPE)
.max_age(1)
}
#[tracing::instrument(level = "debug")]
pub fn api_service_routes(data: Arc<ApiServiceData>) -> Box<dyn Fn(&mut ServiceConfig) + Send + Sync>
{
Box::new(move |cfg: &mut ServiceConfig| {
cfg.app_data(Data::new(Arc::clone(&data)));
cfg.default_service(web::route().to(api_service_not_found));
cfg.service(web::resource("stats").route(web::get().to(api_service_stats_get)));
cfg.service(web::resource("metrics").route(web::get().to(api_service_prom_get)));
cfg.service(web::resource("api/torrent/{info_hash}")
.route(web::get().to(api_service_torrent_get))
.route(web::delete().to(api_service_torrent_delete))
);
cfg.service(web::resource("api/torrent/{info_hash}/{completed}").route(web::post().to(api_service_torrent_post)));
cfg.service(web::resource("api/torrents")
.route(web::get().to(api_service_torrents_get))
.route(web::post().to(api_service_torrents_post))
.route(web::delete().to(api_service_torrents_delete))
);
cfg.service(web::resource("api/whitelist/{info_hash}")
.route(web::get().to(api_service_whitelist_get))
.route(web::post().to(api_service_whitelist_post))
.route(web::delete().to(api_service_whitelist_delete))
);
cfg.service(web::resource("api/whitelists")
.route(web::get().to(api_service_whitelists_get))
.route(web::post().to(api_service_whitelists_post))
.route(web::delete().to(api_service_whitelists_delete))
);
cfg.service(web::resource("api/blacklist/{info_hash}")
.route(web::get().to(api_service_blacklist_get))
.route(web::post().to(api_service_blacklist_post))
.route(web::delete().to(api_service_blacklist_delete))
);
cfg.service(web::resource("api/blacklists")
.route(web::get().to(api_service_blacklists_get))
.route(web::post().to(api_service_blacklists_post))
.route(web::delete().to(api_service_blacklists_delete))
);
cfg.service(web::resource("api/key/{key_hash}")
.route(web::get().to(api_service_key_get))
.route(web::delete().to(api_service_key_delete))
);
cfg.service(web::resource("api/key/{key_hash}/{timeout}")
.route(web::post().to(api_service_key_post))
);
cfg.service(web::resource("api/keys")
.route(web::get().to(api_service_keys_get))
.route(web::post().to(api_service_keys_post))
.route(web::delete().to(api_service_keys_delete))
);
cfg.service(web::resource("api/user/{id}")
.route(web::get().to(api_service_user_get))
.route(web::delete().to(api_service_user_delete))
);
cfg.service(web::resource("api/user/{id}/{key}/{uploaded}/{downloaded}/{completed}/{updated}/{active}")
.route(web::post().to(api_service_user_post))
);
cfg.service(web::resource("api/users")
.route(web::get().to(api_service_users_get))
.route(web::post().to(api_service_users_post))
.route(web::delete().to(api_service_users_delete))
);
if data.torrent_tracker.config.tracker_config.swagger {
cfg.service(SwaggerUi::new("/swagger-ui/{_:.*}").config(Config::new(["/api/openapi.json"])));
cfg.service(web::resource("/api/openapi.json")
.route(web::get().to(api_service_openapi_json))
);
}
})
}
#[tracing::instrument(level = "debug")]
pub async fn api_service(
addr: SocketAddr,
data: Arc<TorrentTracker>,
api_server_object: ApiTrackersConfig
) -> (ServerHandle, impl Future<Output=Result<(), std::io::Error>>)
{
let keep_alive = api_server_object.keep_alive;
let request_timeout = api_server_object.request_timeout;
let disconnect_timeout = api_server_object.disconnect_timeout;
let worker_threads = api_server_object.threads as usize;
let api_service_data = Arc::new(ApiServiceData {
torrent_tracker: Arc::clone(&data),
api_trackers_config: Arc::new(api_server_object.clone()),
});
let app_factory = move || {
let cors = api_service_cors();
let sentry_wrap = sentry_actix::Sentry::new();
App::new()
.wrap(cors)
.wrap(sentry_wrap)
.configure(api_service_routes(Arc::clone(&api_service_data)))
};
if api_server_object.ssl {
info!("[APIS] Starting server listener with SSL on {addr}");
if api_server_object.ssl_key.is_empty() || api_server_object.ssl_cert.is_empty() {
error!("[APIS] No SSL key or SSL certificate given, exiting...");
exit(1);
}
let key_file = &mut BufReader::new(File::open(api_server_object.ssl_key.clone()).unwrap_or_else(|data| {
sentry::capture_error(&data);
panic!("[APIS] SSL key unreadable: {data}");
}));
let certs_file = &mut BufReader::new(File::open(api_server_object.ssl_cert.clone()).unwrap_or_else(|data| {
sentry::capture_error(&data);
panic!("[APIS] SSL cert unreadable: {data}");
}));
let tls_certs = rustls_pemfile::certs(certs_file).collect::<Result<Vec<_>, _>>().unwrap_or_else(|data| {
sentry::capture_error(&data);
panic!("[APIS] SSL cert couldn't be extracted: {data}");
});
let tls_key = rustls_pemfile::pkcs8_private_keys(key_file).next().unwrap().unwrap_or_else(|data| {
sentry::capture_error(&data);
panic!("[APIS] SSL key couldn't be extracted: {data}");
});
let tls_config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(tls_certs, rustls::pki_types::PrivateKeyDer::Pkcs8(tls_key))
.unwrap_or_else(|data| {
sentry::capture_error(&data);
panic!("[APIS] SSL config couldn't be created: {data}");
});
let server = HttpServer::new(app_factory)
.keep_alive(Duration::from_secs(keep_alive))
.client_request_timeout(Duration::from_secs(request_timeout))
.client_disconnect_timeout(Duration::from_secs(disconnect_timeout))
.workers(worker_threads)
.bind_rustls_0_23((addr.ip(), addr.port()), tls_config)
.unwrap()
.disable_signals()
.run();
return (server.handle(), server);
}
info!("[API] Starting server listener on {addr}");
let server = HttpServer::new(app_factory)
.keep_alive(Duration::from_secs(keep_alive))
.client_request_timeout(Duration::from_secs(request_timeout))
.client_disconnect_timeout(Duration::from_secs(disconnect_timeout))
.workers(worker_threads)
.bind((addr.ip(), addr.port()))
.unwrap()
.disable_signals()
.run();
(server.handle(), server)
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_stats_log(ip: IpAddr, tracker: Arc<TorrentTracker>)
{
let event = if ip.is_ipv4() {
StatsEvent::Tcp4ConnectionsHandled
} else {
StatsEvent::Tcp6ConnectionsHandled
};
tracker.update_stats(event, 1);
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_token(token: Option<String>, config: Arc<Configuration>) -> Option<HttpResponse>
{
let token_code = match token {
Some(token) => token,
None => {
return Some(HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({
"status": "missing token"
})));
}
};
if token_code != config.tracker_config.api_key {
return Some(HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({
"status": "invalid token"
})));
}
None
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_retrieve_remote_ip(request: &HttpRequest, data: Arc<ApiTrackersConfig>) -> Result<IpAddr, ()>
{
let origin_ip = request.peer_addr().map(|addr| addr.ip()).ok_or(())?;
request.headers()
.get(&data.real_ip)
.and_then(|header| header.to_str().ok())
.and_then(|ip_str| IpAddr::from_str(ip_str).ok())
.map(Ok)
.unwrap_or(Ok(origin_ip))
}
#[tracing::instrument(level = "debug")]
pub async fn api_validate_ip(request: &HttpRequest, data: Data<Arc<ApiServiceData>>) -> Result<IpAddr, HttpResponse>
{
match api_service_retrieve_remote_ip(request, Arc::clone(&data.api_trackers_config)).await {
Ok(ip) => {
api_service_stats_log(ip, Arc::clone(&data.torrent_tracker)).await;
Ok(ip)
}
Err(_) => {
Err(HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "invalid ip"
})))
}
}
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_not_found(request: HttpRequest, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await {
return error_return;
}
HttpResponse::NotFound().content_type(ContentType::json()).json(json!({
"status": "not found"
}))
}
#[tracing::instrument(level = "debug")]
pub fn api_stat_update(ip: IpAddr, data: Arc<TorrentTracker>, stats_ipv4: StatsEvent, stat_ipv6: StatsEvent, count: i64)
{
let event = if ip.is_ipv4() {
stats_ipv4
} else {
stat_ipv6
};
data.update_stats(event, count);
}
#[tracing::instrument(level = "debug")]
pub async fn api_validation(request: &HttpRequest, data: &Data<Arc<ApiServiceData>>) -> Option<HttpResponse>
{
match api_validate_ip(request, data.clone()).await {
Ok(ip) => {
api_stat_update(
ip,
Arc::clone(&data.torrent_tracker),
StatsEvent::Tcp4ApiHandled,
StatsEvent::Tcp6ApiHandled,
1
);
None
}
Err(result) => Some(result),
}
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_openapi_json() -> HttpResponse
{
let openapi_file = include_str!("../openapi.json");
HttpResponse::Ok().content_type(ContentType::json()).body(openapi_file)
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_parse_body(mut payload: web::Payload) -> Result<BytesMut, CustomError>
{
let mut body = BytesMut::new();
while let Some(chunk) = payload.next().await {
let chunk = chunk.map_err(|_| CustomError::new("chunk error"))?;
if body.len() + chunk.len() > 1_048_576 {
return Err(CustomError::new("chunk size exceeded"));
}
body.extend_from_slice(&chunk);
}
Ok(body)
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/mod.rs | src/api/mod.rs | pub mod structs;
#[allow(clippy::module_inception)]
pub mod api;
pub mod api_blacklists;
pub mod api_keys;
pub mod api_torrents;
pub mod api_users;
pub mod api_whitelists;
pub mod api_stats; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/api_torrents.rs | src/api/api_torrents.rs | use crate::tracker::structs::peer_id::PeerId;
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use actix_web::{web, HttpRequest, HttpResponse};
use actix_web::http::header::ContentType;
use actix_web::web::Data;
use serde_json::{json, Value};
use crate::api::api::{api_parse_body, api_service_token, api_validation};
use crate::api::structs::api_service_data::ApiServiceData;
use crate::api::structs::query_token::QueryToken;
use crate::common::common::hex2bin;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
use crate::tracker::structs::torrent_entry::TorrentEntry;
#[tracing::instrument(level = "debug")]
pub async fn api_service_torrent_get(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
match data.torrent_tracker.get_torrent(info_hash) {
None => HttpResponse::NotFound().content_type(ContentType::json()).json(json!({"status": "unknown info_hash"})),
Some(torrent) => HttpResponse::Ok().content_type(ContentType::json()).json(api_service_torrents_return_torrent_json(torrent)),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_torrents_get(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let info_hashes = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(hash) => hash,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut torrents_output = HashMap::with_capacity(info_hashes.len());
for info in info_hashes {
if info.len() == 40 {
match hex2bin(info.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
if let Some(torrent) = data.torrent_tracker.get_torrent(info_hash) {
torrents_output.insert(info, api_service_torrents_return_torrent_json(torrent));
}
}
Err(_) => {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"}))
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"torrents": torrents_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_torrent_post(request: HttpRequest, path: web::Path<(String, u64)>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let (info, completed) = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
let torrent_entry = TorrentEntry {
seeds: BTreeMap::new(),
peers: BTreeMap::new(),
completed,
updated: std::time::Instant::now(),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_torrent_update(info_hash, torrent_entry.clone(), UpdatesAction::Add);
}
match data.torrent_tracker.add_torrent(info_hash, torrent_entry) {
(_, true) => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
(_, false) => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "info_hash updated"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_torrents_post(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let info_hashmap = match serde_json::from_slice::<HashMap<String, u64>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut torrents_output = HashMap::with_capacity(info_hashmap.len());
for (info, completed) in info_hashmap {
if info.len() == 40 {
match hex2bin(info.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
let torrent_entry = TorrentEntry {
seeds: BTreeMap::new(),
peers: BTreeMap::new(),
completed,
updated: std::time::Instant::now(),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_torrent_update(info_hash, torrent_entry.clone(), UpdatesAction::Add);
}
let status = match data.torrent_tracker.add_torrent(info_hash, torrent_entry) {
(_, true) => json!({"status": "ok"}),
(_, false) => json!({"status": "info_hash updated"}),
};
torrents_output.insert(info, status);
}
Err(_) => {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"}))
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"torrents": torrents_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_torrent_delete(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_torrent_update(info_hash, TorrentEntry::default(), UpdatesAction::Remove);
}
match data.torrent_tracker.remove_torrent(info_hash) {
None => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "unknown info_hash"})),
Some(_) => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_torrents_delete(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let hashes = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut torrents_output = HashMap::with_capacity(hashes.len());
for info in hashes {
if info.len() == 40 {
match hex2bin(info.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_torrent_update(info_hash, TorrentEntry::default(), UpdatesAction::Remove);
}
let status = match data.torrent_tracker.remove_torrent(info_hash) {
None => json!({"status": "unknown info_hash"}),
Some(_) => json!({"status": "ok"}),
};
torrents_output.insert(info, status);
}
Err(_) => {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"}))
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"torrents": torrents_output
}))
}
#[tracing::instrument(level = "debug")]
pub fn api_service_torrents_return_torrent_json(torrent: TorrentEntry) -> Value
{
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis();
let process_seed = |(peer_id, torrent_peer): (&PeerId, &crate::tracker::structs::torrent_peer::TorrentPeer)| {
let elapsed_ms = torrent_peer.updated.elapsed().as_millis();
let timestamp = now.saturating_sub(elapsed_ms);
let timestamp_final = ((timestamp as f64 / 2.0).round() * 2.0) as u64;
json!({
"peer_id": peer_id.0,
"peer_addr": torrent_peer.peer_addr,
"updated": timestamp_final,
"uploaded": torrent_peer.uploaded.0 as u64,
"downloaded": torrent_peer.downloaded.0 as u64,
"left": torrent_peer.left.0 as u64,
})
};
let process_peer = |(peer_id, torrent_peer): (&PeerId, &crate::tracker::structs::torrent_peer::TorrentPeer)| {
let elapsed_ms = torrent_peer.updated.elapsed().as_millis();
let timestamp = now.saturating_sub(elapsed_ms);
let timestamp_final = ((timestamp as f64 / 2.0).round() * 2.0) as u64;
json!({
"peer_id": peer_id.0,
"peer_addr": torrent_peer.peer_addr,
"updated": timestamp_final,
"uploaded": torrent_peer.uploaded.0 as u64,
"downloaded": torrent_peer.downloaded.0 as u64,
"left": torrent_peer.left.0 as u64,
})
};
let seeds: Vec<Value> = torrent.seeds.iter().map(process_seed).collect();
let peers: Vec<Value> = torrent.peers.iter().map(process_peer).collect();
let elapsed_ms = torrent.updated.elapsed().as_millis();
let timestamp = now.saturating_sub(elapsed_ms);
let timestamp_final = ((timestamp as f64 / 2.0).round() * 2.0) as u64;
json!({
"status": "ok",
"seeds": seeds,
"peers": peers,
"completed": torrent.completed,
"updated": timestamp_final
})
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/api_whitelists.rs | src/api/api_whitelists.rs | use std::collections::HashMap;
use std::sync::Arc;
use actix_web::{web, HttpRequest, HttpResponse};
use actix_web::http::header::ContentType;
use actix_web::web::Data;
use serde_json::json;
use crate::api::api::{api_parse_body, api_service_token, api_validation};
use crate::api::structs::api_service_data::ApiServiceData;
use crate::api::structs::query_token::QueryToken;
use crate::common::common::hex2bin;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::info_hash::InfoHash;
#[tracing::instrument(level = "debug")]
pub async fn api_service_whitelist_get(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
match data.torrent_tracker.check_whitelist(info_hash) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotFound().content_type(ContentType::json()).json(json!({"status": "unknown info_hash"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_whitelists_get(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let whitelists = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut whitelist_output = HashMap::with_capacity(whitelists.len());
for whitelist in whitelists {
if whitelist.len() == 40 {
match hex2bin(whitelist.clone()) {
Ok(hash) => {
let whitelist_hash = InfoHash(hash);
whitelist_output.insert(whitelist, data.torrent_tracker.check_whitelist(whitelist_hash));
}
Err(_) => {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"}))
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"whitelists": whitelist_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_whitelist_post(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_whitelist_update(info_hash, UpdatesAction::Add);
}
match data.torrent_tracker.add_whitelist(info_hash) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "info_hash updated"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_whitelists_post(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let whitelists = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut whitelists_output = HashMap::with_capacity(whitelists.len());
for info in whitelists {
if info.len() == 40 {
match hex2bin(info.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_whitelist_update(info_hash, UpdatesAction::Add);
}
let status = match data.torrent_tracker.add_whitelist(info_hash) {
true => json!({"status": "ok"}),
false => json!({"status": "info_hash updated"}),
};
whitelists_output.insert(info, status);
}
Err(_) => {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"}))
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"whitelists": whitelists_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_whitelist_delete(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let info = path.into_inner();
if info.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}));
}
let info_hash = match hex2bin(info) {
Ok(hash) => InfoHash(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_whitelist_update(info_hash, UpdatesAction::Remove);
}
match data.torrent_tracker.remove_whitelist(info_hash) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
false => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "unknown info_hash"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_whitelists_delete(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let whitelists = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut whitelists_output = HashMap::with_capacity(whitelists.len());
for info in whitelists {
if info.len() == 40 {
match hex2bin(info.clone()) {
Ok(hash) => {
let info_hash = InfoHash(hash);
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_whitelist_update(info_hash, UpdatesAction::Remove);
}
let status = match data.torrent_tracker.remove_whitelist(info_hash) {
true => json!({"status": "ok"}),
false => json!({"status": "unknown info_hash"}),
};
whitelists_output.insert(info, status);
}
Err(_) => {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid info_hash"}))
}
}
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"whitelists": whitelists_output
}))
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/api_users.rs | src/api/api_users.rs | use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use actix_web::{web, HttpRequest, HttpResponse};
use actix_web::http::header::ContentType;
use actix_web::http::StatusCode;
use actix_web::web::Data;
use regex::Regex;
use serde_json::{json, Value};
use sha1::{Digest, Sha1};
use crate::api::api::{api_parse_body, api_service_token, api_validation};
use crate::api::structs::api_service_data::ApiServiceData;
use crate::api::structs::query_token::QueryToken;
use crate::common::common::hex2bin;
use crate::tracker::enums::updates_action::UpdatesAction;
use crate::tracker::structs::user_entry_item::UserEntryItem;
use crate::tracker::structs::user_id::UserId;
lazy_static::lazy_static! {
static ref UUID_REGEX: Regex = Regex::new(r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$").unwrap();
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_user_get(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let id = path.into_inner();
let (status_code, data) = api_service_users_return_json(id, data);
match status_code {
StatusCode::OK => HttpResponse::Ok().content_type(ContentType::json()).json(data),
_ => HttpResponse::NotFound().content_type(ContentType::json()).json(data),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_users_get(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let ids = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(id) => id,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut users_output = HashMap::with_capacity(ids.len());
for id in ids {
if id.len() == 40 {
let (_, user_data) = api_service_users_return_json(id.clone(), Data::clone(&data));
users_output.insert(id, user_data);
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"users": users_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_user_post(request: HttpRequest, path: web::Path<(String, String, u64, u64, u64, u64, u8)>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let (id, key, uploaded, downloaded, completed, updated, active) = path.into_inner();
if key.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad key_hash"}));
}
let key_hash = match hex2bin(key) {
Ok(hash) => UserId(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid key_hash"})),
};
let mut user_entry = UserEntryItem {
key: key_hash,
user_id: None,
user_uuid: None,
uploaded,
downloaded,
completed,
updated,
active,
torrents_active: BTreeMap::new(),
};
let id_hash = if data.torrent_tracker.config.database_structure.users.id_uuid {
if !UUID_REGEX.is_match(&id) {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid uuid"}));
}
user_entry.user_uuid = Some(id.to_lowercase());
hash_id(&id)
} else {
match id.parse::<u64>() {
Ok(user_id) => {
user_entry.user_id = Some(user_id);
hash_id(&id)
}
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid id"})),
}
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_user_update(UserId(id_hash), user_entry.clone(), UpdatesAction::Add);
}
match data.torrent_tracker.add_user(UserId(id_hash), user_entry) {
true => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "user_hash added"})),
false => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "user_hash updated"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_users_post(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let hashes = match serde_json::from_slice::<Vec<(String, String, u64, u64, u64, u64, u8)>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut users_output = HashMap::with_capacity(hashes.len());
for (id, key, uploaded, downloaded, completed, updated, active) in hashes {
if key.len() == 40 {
let key_hash = match hex2bin(key) {
Ok(hash) => UserId(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid key_hash"})),
};
let mut user_entry = UserEntryItem {
key: key_hash,
user_id: None,
user_uuid: None,
uploaded,
downloaded,
completed,
updated,
active,
torrents_active: BTreeMap::new(),
};
let id_hash = if data.torrent_tracker.config.database_structure.users.id_uuid {
if !UUID_REGEX.is_match(&id) {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid uuid"}));
}
user_entry.user_uuid = Some(id.to_lowercase());
hash_id(&id)
} else {
match id.parse::<u64>() {
Ok(user_id) => {
user_entry.user_id = Some(user_id);
hash_id(&id)
}
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid id"})),
}
};
if data.torrent_tracker.config.database.persistent {
let _ = data.torrent_tracker.add_user_update(UserId(id_hash), user_entry.clone(), UpdatesAction::Add);
}
let status = match data.torrent_tracker.add_user(UserId(id_hash), user_entry) {
true => json!({"status": "user_hash added"}),
false => json!({"status": "user_hash updated"}),
};
users_output.insert(id, status);
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"users": users_output
}))
}
#[tracing::instrument(level = "debug")]
pub async fn api_service_user_delete(request: HttpRequest, path: web::Path<String>, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let id = path.into_inner();
if id.len() != 40 {
return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad user_hash"}));
}
let id_hash = match hex2bin(id) {
Ok(hash) => UserId(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid user_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let empty_user = UserEntryItem {
key: UserId([0u8; 20]),
user_id: None,
user_uuid: None,
uploaded: 0,
downloaded: 0,
completed: 0,
updated: 0,
active: 0,
torrents_active: BTreeMap::new(),
};
let _ = data.torrent_tracker.add_user_update(id_hash, empty_user, UpdatesAction::Remove);
}
match data.torrent_tracker.remove_user(id_hash) {
None => HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": "unknown user_hash"})),
Some(_) => HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})),
}
}
#[tracing::instrument(skip(payload), level = "debug")]
pub async fn api_service_users_delete(request: HttpRequest, payload: web::Payload, data: Data<Arc<ApiServiceData>>) -> HttpResponse
{
if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
let params = web::Query::<QueryToken>::from_query(request.query_string()).unwrap();
if let Some(response) = api_service_token(params.token.clone(), Arc::clone(&data.torrent_tracker.config)).await { return response; }
let body = match api_parse_body(payload).await {
Ok(data) => data,
Err(error) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})),
};
let ids = match serde_json::from_slice::<Vec<String>>(&body) {
Ok(data) => data,
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})),
};
let mut users_output = HashMap::with_capacity(ids.len());
for id in ids {
if id.len() == 40 {
let id_hash = match hex2bin(id.clone()) {
Ok(hash) => UserId(hash),
Err(_) => return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "invalid user_hash"})),
};
if data.torrent_tracker.config.database.persistent {
let empty_user = UserEntryItem {
key: UserId([0u8; 20]),
user_id: None,
user_uuid: None,
uploaded: 0,
downloaded: 0,
completed: 0,
updated: 0,
active: 0,
torrents_active: BTreeMap::new(),
};
let _ = data.torrent_tracker.add_user_update(id_hash, empty_user, UpdatesAction::Remove);
}
let status = match data.torrent_tracker.remove_user(id_hash) {
None => json!({"status": "unknown user_hash"}),
Some(_) => json!({"status": "ok"}),
};
users_output.insert(id, status);
}
}
HttpResponse::Ok().content_type(ContentType::json()).json(json!({
"status": "ok",
"users": users_output
}))
}
#[tracing::instrument(level = "debug")]
pub fn api_service_users_return_json(id: String, data: Data<Arc<ApiServiceData>>) -> (StatusCode, Value)
{
let id_hash = hash_id(&id);
let uses_uuid = data.torrent_tracker.config.database_structure.users.id_uuid;
match data.torrent_tracker.get_user(UserId(id_hash)) {
None => (StatusCode::NOT_FOUND, json!({"status": "unknown user_hash"})),
Some(user_data) => {
let response = if uses_uuid {
json!({
"status": "ok",
"uuid": user_data.user_uuid,
"key": user_data.key,
"uploaded": user_data.uploaded,
"downloaded": user_data.downloaded,
"completed": user_data.completed,
"updated": user_data.updated,
"active": user_data.active,
"torrents_active": user_data.torrents_active
})
} else {
json!({
"status": "ok",
"id": user_data.user_id,
"key": user_data.key,
"uploaded": user_data.uploaded,
"downloaded": user_data.downloaded,
"completed": user_data.completed,
"updated": user_data.updated,
"active": user_data.active,
"torrents_active": user_data.torrents_active
})
};
(StatusCode::OK, response)
}
}
}
fn hash_id(id: &str) -> [u8; 20] {
let mut hasher = Sha1::new();
hasher.update(id.as_bytes());
<[u8; 20]>::try_from(hasher.finalize().as_slice()).unwrap()
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/structs/query_token.rs | src/api/structs/query_token.rs | use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct QueryToken {
pub(crate) token: Option<String>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/api/structs/api_service_data.rs | src/api/structs/api_service_data.rs | use std::sync::Arc;
use crate::config::structs::api_trackers_config::ApiTrackersConfig;
use crate::tracker::structs::torrent_tracker::TorrentTracker;
#[derive(Debug)]
pub struct ApiServiceData {
pub(crate) torrent_tracker: Arc<TorrentTracker>,
pub(crate) api_trackers_config: Arc<ApiTrackersConfig>
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/structs.rs | src/common/structs.rs | pub mod custom_error;
pub mod number_of_bytes;
pub mod number_of_bytes_def;
pub mod get_torrents_api;
pub mod get_torrent_api; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/mod.rs | src/common/mod.rs | pub mod structs;
#[allow(clippy::module_inception)]
pub mod common;
pub mod impls; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/impls.rs | src/common/impls.rs | pub mod custom_error; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/common.rs | src/common/common.rs | use std::collections::HashMap;
use std::fmt;
use std::fmt::Formatter;
use std::io::Cursor;
use std::time::{Duration, SystemTime};
use async_std::future;
use byteorder::{BigEndian, ReadBytesExt};
use fern::colors::{Color, ColoredLevelConfig};
use log::info;
use tokio_shutdown::Shutdown;
use crate::common::structs::custom_error::CustomError;
use crate::config::structs::configuration::Configuration;
pub fn parse_query(query: Option<String>) -> Result<HashMap<String, Vec<Vec<u8>>>, CustomError> {
let mut queries = HashMap::new();
if let Some(result) = query {
for query_item in result.split('&') {
if query_item.is_empty() {
continue;
}
if let Some(equal_pos) = query_item.find('=') {
let (key_part, value_part) = query_item.split_at(equal_pos);
let key_name_raw = key_part;
let value_data_raw = &value_part[1..];
let key_name = percent_encoding::percent_decode_str(key_name_raw)
.decode_utf8_lossy()
.to_lowercase();
if key_name.is_empty() {
continue;
}
let value_data = percent_encoding::percent_decode_str(value_data_raw).collect::<Vec<u8>>();
queries
.entry(key_name)
.or_insert_with(Vec::new)
.push(value_data);
} else {
let key_name = percent_encoding::percent_decode_str(query_item)
.decode_utf8_lossy()
.to_lowercase();
if key_name.is_empty() {
continue;
}
queries
.entry(key_name)
.or_insert_with(Vec::new)
.push(Vec::new());
}
}
}
Ok(queries)
}
pub fn udp_check_host_and_port_used(bind_address: String) {
if cfg!(target_os = "windows") {
if let Err(data) = std::net::UdpSocket::bind(&bind_address) {
sentry::capture_error(&data);
panic!("Unable to bind to {} ! Exiting...", &bind_address);
}
}
}
pub(crate) fn bin2hex(data: &[u8; 20], f: &mut Formatter) -> fmt::Result {
let mut chars = [0u8; 40];
binascii::bin2hex(data, &mut chars).expect("failed to hexlify");
write!(f, "{}", std::str::from_utf8(&chars).unwrap())
}
pub fn hex2bin(data: String) -> Result<[u8; 20], CustomError> {
hex::decode(data)
.map_err(|data| {
sentry::capture_error(&data);
CustomError::new("error converting hex to bin")
})
.and_then(|hash_result| {
hash_result
.get(..20)
.and_then(|slice| slice.try_into().ok())
.ok_or_else(|| CustomError::new("invalid hex length"))
})
}
pub fn print_type<T>(_: &T) {
println!("{:?}", std::any::type_name::<T>());
}
pub fn return_type<T>(_: &T) -> String {
format!("{:?}", std::any::type_name::<T>())
}
pub fn equal_string_check(source: &str, check: &str) -> bool {
if source == check {
return true;
}
println!("Source: {source}");
println!("Check: {check}");
false
}
pub fn setup_logging(config: &Configuration) {
let level = match config.log_level.as_str() {
"off" => log::LevelFilter::Off,
"trace" => log::LevelFilter::Trace,
"debug" => log::LevelFilter::Debug,
"info" => log::LevelFilter::Info,
"warn" => log::LevelFilter::Warn,
"error" => log::LevelFilter::Error,
_ => {
panic!("Unknown log level encountered: '{}'", config.log_level.as_str());
}
};
let colors = ColoredLevelConfig::new()
.trace(Color::Cyan)
.debug(Color::Magenta)
.info(Color::Green)
.warn(Color::Yellow)
.error(Color::Red);
fern::Dispatch::new()
.format(move |out, message, record| {
out.finish(format_args!(
"{} [{:width$}][{}] {}",
chrono::Local::now().format("%Y-%m-%d %H:%M:%S%.9f"),
colors.color(record.level()),
record.target(),
message,
width = 5
))
})
.level(level)
.chain(std::io::stdout())
.apply()
.unwrap_or_else(|_| panic!("Failed to initialize logging."));
info!("logging initialized.");
}
pub async fn current_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("System time before UNIX epoch")
.as_secs()
}
pub async fn convert_int_to_bytes(number: &u64) -> Vec<u8> {
let bytes = number.to_be_bytes();
let leading_zeros = number.leading_zeros() as usize / 8;
bytes[leading_zeros..].to_vec()
}
pub async fn convert_bytes_to_int(array: &[u8]) -> u64 {
let mut array_fixed = [0u8; 8];
let start_idx = 8 - array.len();
array_fixed[start_idx..].copy_from_slice(array);
Cursor::new(array_fixed).read_u64::<BigEndian>().unwrap()
}
pub async fn shutdown_waiting(timeout: Duration, shutdown_handler: Shutdown) -> bool {
future::timeout(timeout, shutdown_handler.handle())
.await
.is_ok()
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/impls/custom_error.rs | src/common/impls/custom_error.rs | use std::error::Error;
use std::fmt;
use std::fmt::Formatter;
use crate::common::structs::custom_error::CustomError;
impl CustomError {
pub fn new(msg: &str) -> CustomError {
CustomError { message: msg.to_string() }
}
}
impl fmt::Display for CustomError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", self.message)
}
}
impl Error for CustomError {
fn description(&self) -> &str {
&self.message
}
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/structs/get_torrents_api.rs | src/common/structs/get_torrents_api.rs | use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone)]
pub struct GetTorrentsApi {
pub info_hash: String,
pub completed: i64,
pub seeders: i64,
pub leechers: i64,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/structs/number_of_bytes.rs | src/common/structs/number_of_bytes.rs | #[derive(PartialEq, PartialOrd, Eq, Hash, Clone, Copy, Debug)]
pub struct NumberOfBytes(pub i64); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/structs/number_of_bytes_def.rs | src/common/structs/number_of_bytes_def.rs | use serde::{Deserialize, Serialize};
use crate::common::structs::number_of_bytes::NumberOfBytes;
#[derive(Serialize, Deserialize)]
#[serde(remote = "NumberOfBytes")]
pub struct NumberOfBytesDef(pub i64); | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/structs/get_torrent_api.rs | src/common/structs/get_torrent_api.rs | use serde::{Deserialize, Serialize};
use serde_json::Value;
#[derive(Serialize, Deserialize, Clone)]
pub struct GetTorrentApi {
pub info_hash: String,
pub completed: i64,
pub seeders: i64,
pub leechers: i64,
pub peers: Vec<Value>,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/common/structs/custom_error.rs | src/common/structs/custom_error.rs | #[derive(Debug)]
pub struct CustomError {
pub(crate) message: String,
} | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/config/structs.rs | src/config/structs.rs | pub mod api_trackers_config;
pub mod configuration;
pub mod database_structure_config;
pub mod http_trackers_config;
pub mod udp_trackers_config;
pub mod database_structure_config_blacklist;
pub mod database_structure_config_keys;
pub mod database_structure_config_torrents;
pub mod database_structure_config_users;
pub mod database_structure_config_whitelist;
pub mod database_config;
pub mod tracker_config;
pub mod sentry_config; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Power2All/torrust-actix | https://github.com/Power2All/torrust-actix/blob/20d91c8be4b979abd1d67602b898a444d48a8945/src/config/enums.rs | src/config/enums.rs | pub mod configuration_error; | rust | MIT | 20d91c8be4b979abd1d67602b898a444d48a8945 | 2026-01-04T20:24:47.160557Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.