repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/user/invite_rule.rs | crates/server/src/user/invite_rule.rs | use diesel::prelude::*;
use palpo_core::push::PusherIds;
use url::Url;
use crate::core::UnixMillis;
use crate::core::client::push::{PusherAction, PusherPostData};
use crate::core::events::TimelineEventType;
use crate::core::identifiers::*;
use crate::core::push::push_gateway::{
Device, Notification, NotificationCounts, NotificationPriority, SendEventNotificationReqBody,
};
use crate::core::push::{Action, PushFormat, Pusher, PusherKind, Ruleset, Tweak};
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::pusher::NewDbPusher;
use crate::event::PduEvent;
use crate::{AppError, AppResult, AuthedInfo, data, room};
pub fn get_invte_rule(user_id: &UserId) -> AppResult<InviteRule> {
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/user/presence.rs | crates/server/src/user/presence.rs | use diesel::prelude::*;
use crate::core::federation::transaction::Edu;
use crate::core::presence::{PresenceContent, PresenceState, PresenceUpdate};
use crate::core::{OwnedServerName, UnixMillis, UserId};
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::{NewDbPresence, last_presence};
use crate::{AppResult, config, data, sending};
/// Resets the presence timeout, so the user will stay in their current presence state.
pub fn ping_presence(user_id: &UserId, new_state: &PresenceState) -> AppResult<()> {
if !config::get().presence.allow_local {
return Ok(());
}
const REFRESH_TIMEOUT: u64 = 60 * 1000;
let last_presence = last_presence(user_id);
let state_changed = match last_presence {
Err(_) => true,
Ok(ref presence) => presence.content.presence != *new_state,
};
let last_last_active_ago = match last_presence {
Err(_) => 0_u64,
Ok(ref presence) => presence.content.last_active_ago.unwrap_or_default(),
};
if !state_changed && last_last_active_ago < REFRESH_TIMEOUT {
return Ok(());
}
let _status_msg = match last_presence {
Ok(presence) => presence.content.status_msg.clone(),
Err(_) => Some(String::new()),
};
let currently_active = *new_state == PresenceState::Online;
data::user::set_presence(
NewDbPresence {
user_id: user_id.to_owned(),
stream_id: None,
state: Some(new_state.to_string()),
status_msg: None,
last_active_at: Some(UnixMillis::now()),
last_federation_update_at: None,
last_user_sync_at: None,
currently_active: Some(currently_active),
occur_sn: None,
},
false,
)?;
Ok(())
}
/// Adds a presence event which will be saved until a new event replaces it.
pub fn set_presence(
sender_id: &UserId,
presence_state: Option<PresenceState>,
status_msg: Option<String>,
force: bool,
) -> AppResult<bool> {
if !config::get().presence.allow_local {
return Ok(false);
}
let Some(presence_state) = presence_state else {
data::user::remove_presence(sender_id)?;
return Ok(false);
};
let db_presence = NewDbPresence {
user_id: sender_id.to_owned(),
stream_id: None,
state: Some(presence_state.to_string()),
status_msg: status_msg.clone(),
last_active_at: None,
last_federation_update_at: None,
last_user_sync_at: None,
currently_active: Some(presence_state == PresenceState::Online),
occur_sn: None,
};
let state_changed = data::user::set_presence(db_presence, force)?;
if state_changed {
let edu = Edu::Presence(PresenceContent {
push: vec![PresenceUpdate {
user_id: sender_id.to_owned(),
status_msg,
last_active_ago: 0,
currently_active: presence_state == PresenceState::Online,
presence: presence_state,
}],
});
let joined_rooms = data::user::joined_rooms(sender_id)?;
let remote_servers = room_joined_servers::table
.filter(room_joined_servers::room_id.eq_any(joined_rooms))
.select(room_joined_servers::server_id)
.distinct()
.load::<OwnedServerName>(&mut connect()?)?;
sending::send_edu_servers(remote_servers.into_iter(), &edu)?;
}
Ok(state_changed)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/user/session.rs | crates/server/src/user/session.rs | use std::str::FromStr;
use jsonwebtoken::{Algorithm, DecodingKey, Validation};
use serde::Deserialize;
use crate::config::JwtConfig;
use crate::{AppError, AppResult, MatrixError};
#[derive(Debug, Deserialize)]
pub struct JwtClaims {
pub sub: String,
}
pub fn validate_jwt_token(config: &JwtConfig, token: &str) -> AppResult<JwtClaims> {
let verifier = init_jwt_verifier(config)?;
let validator = init_jwt_validator(config)?;
jsonwebtoken::decode::<JwtClaims>(token, &verifier, &validator)
.map(|decoded| (decoded.header, decoded.claims))
.inspect(|(head, claim)| debug!(?head, ?claim, "JWT token decoded"))
.map_err(|e| MatrixError::not_found(format!("invalid JWT token: {e}")).into())
.map(|(_, claims)| claims)
}
fn init_jwt_verifier(config: &JwtConfig) -> AppResult<DecodingKey> {
let secret = &config.secret;
let format = config.format.as_str();
Ok(match format {
"HMAC" => DecodingKey::from_secret(secret.as_bytes()),
"HMACB64" => DecodingKey::from_base64_secret(secret.as_str())
.map_err(|_e| AppError::public("jwt secret is not valid base64"))?,
"ECDSA" => DecodingKey::from_ec_pem(secret.as_bytes())
.map_err(|_e| AppError::public("jwt key is not valid PEM"))?,
_ => return Err(AppError::public("jwt secret format is not supported")),
})
}
fn init_jwt_validator(config: &JwtConfig) -> AppResult<Validation> {
let alg = config.algorithm.as_str();
let alg = Algorithm::from_str(alg)
.map_err(|_e| AppError::public("jwt algorithm is not recognized or configured"))?;
let mut validator = Validation::new(alg);
let mut required_spec_claims: Vec<_> = ["sub"].into();
validator.validate_exp = config.validate_exp;
if config.require_exp {
required_spec_claims.push("exp");
}
validator.validate_nbf = config.validate_nbf;
if config.require_nbf {
required_spec_claims.push("nbf");
}
if !config.audience.is_empty() {
required_spec_claims.push("aud");
validator.set_audience(&config.audience);
}
if !config.issuer.is_empty() {
required_spec_claims.push("iss");
validator.set_issuer(&config.issuer);
}
if cfg!(debug_assertions) && !config.validate_signature {
warn!("JWT signature validation is disabled!");
validator.insecure_disable_signature_validation();
}
validator.set_required_spec_claims(&required_spec_claims);
debug!(?validator, "JWT configured");
Ok(validator)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/user/ldap.rs | crates/server/src/user/ldap.rs | use std::collections::HashMap;
use ldap3::{LdapConnAsync, Scope, SearchEntry};
use crate::core::UserId;
use crate::{AppError, AppResult, MatrixError, config};
pub async fn search_ldap(user_id: &UserId) -> AppResult<Vec<(String, bool)>> {
let localpart = user_id.localpart().to_owned();
let lowercased_localpart = localpart.to_lowercase();
let conf =
config::enabled_ldap().ok_or_else(|| AppError::public("LDAP is not enabled in the configuration"))?;
let uri = conf
.uri
.as_ref()
.ok_or_else(|| AppError::public("LDAP URI is not configured."))?;
debug!(?uri, "LDAP creating connection...");
let (conn, mut ldap) = LdapConnAsync::new(uri.as_str())
.await
.map_err(|e| AppError::public("LDAP connection setup error: {e}"))?;
let driver = tokio::spawn(async move {
match conn.drive().await {
Err(e) => error!("LDAP connection error: {e}"),
Ok(()) => debug!("LDAP connection completed"),
}
});
match (&conf.bind_dn, &conf.bind_password_file) {
(Some(bind_dn), Some(bind_password_file)) => {
let bind_pw = String::from_utf8(std::fs::read(bind_password_file)?)?;
ldap.simple_bind(bind_dn, bind_pw.trim())
.await
.and_then(ldap3::LdapResult::success)
.map_err(|e| AppError::public(format!("LDAP bind error: {e}")))?;
}
(..) => {}
}
let attr = [&conf.uid_attribute, &conf.name_attribute];
let user_filter = &conf.filter.replace("{username}", &lowercased_localpart);
let (entries, _result) = ldap
.search(&conf.base_dn, Scope::Subtree, user_filter, &attr)
.await
.and_then(ldap3::SearchResult::success)
.inspect(|(entries, result)| trace!(?entries, ?result, "LDAP Search"))
.map_err(|e| AppError::public(format!("LDAP search error: {e}")))?;
let mut dns: HashMap<String, bool> = entries
.into_iter()
.filter_map(|entry| {
let search_entry = SearchEntry::construct(entry);
debug!(?search_entry, "LDAP search entry");
search_entry
.attrs
.get(&conf.uid_attribute)
.into_iter()
.chain(search_entry.attrs.get(&conf.name_attribute))
.any(|ids| ids.contains(&localpart) || ids.contains(&lowercased_localpart))
.then_some((search_entry.dn, false))
})
.collect();
if !conf.admin_filter.is_empty() {
let admin_base_dn = if conf.admin_base_dn.is_empty() {
&conf.base_dn
} else {
&conf.admin_base_dn
};
let admin_filter = &conf.admin_filter.replace("{username}", &lowercased_localpart);
let (admin_entries, _result) = ldap
.search(admin_base_dn, Scope::Subtree, admin_filter, &attr)
.await
.and_then(ldap3::SearchResult::success)
.inspect(|(entries, result)| trace!(?entries, ?result, "LDAP Admin Search"))
.map_err(|e| AppError::public(format!("Ldap admin search error: {e}")))?;
dns.extend(admin_entries.into_iter().filter_map(|entry| {
let search_entry = SearchEntry::construct(entry);
debug!(?search_entry, "LDAP search entry");
search_entry
.attrs
.get(&conf.uid_attribute)
.into_iter()
.chain(search_entry.attrs.get(&conf.name_attribute))
.any(|ids| ids.contains(&localpart) || ids.contains(&lowercased_localpart))
.then_some((search_entry.dn, true))
}));
}
ldap.unbind()
.await
.map_err(|e| AppError::public(format!("LDAP unbind error: {e}")))?;
driver.await.ok();
Ok(dns.drain().collect())
}
pub async fn auth_ldap(user_dn: &str, password: &str) -> AppResult<()> {
let conf =
config::enabled_ldap().ok_or_else(|| AppError::public("LDAP is not enabled in the configuration"))?;
let uri = conf
.uri
.as_ref()
.ok_or_else(|| AppError::public(format!("LDAP URI is not configured")))?;
debug!(?uri, "LDAP creating connection...");
let (conn, mut ldap) = LdapConnAsync::new(uri.as_str())
.await
.map_err(|e| AppError::public(format!("LDAP connection setup error: {e}")))?;
let driver = tokio::spawn(async move {
match conn.drive().await {
Err(e) => error!("LDAP connection error: {e}"),
Ok(()) => debug!("LDAP connection completed."),
}
});
ldap.simple_bind(user_dn, password)
.await
.and_then(ldap3::LdapResult::success)
.map_err(|e| MatrixError::forbidden(format!("LDAP authentication error: {e}"), None))?;
ldap.unbind()
.await
.map_err(|e| AppError::public(format!("LDAP unbind error: {e}")))?;
driver.await.ok();
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/user/pusher.rs | crates/server/src/user/pusher.rs | use diesel::prelude::*;
use palpo_core::push::PusherIds;
use url::Url;
use crate::core::UnixMillis;
use crate::core::client::push::{PusherAction, PusherPostData};
use crate::core::events::TimelineEventType;
use crate::core::identifiers::*;
use crate::core::push::push_gateway::{
Device, Notification, NotificationCounts, NotificationPriority, SendEventNotificationReqBody,
};
use crate::core::push::{Action, PushFormat, Pusher, PusherKind, Ruleset, Tweak};
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::pusher::NewDbPusher;
use crate::event::PduEvent;
use crate::{AppError, AppResult, AuthedInfo, data, room};
pub fn set_pusher(authed: &AuthedInfo, pusher: PusherAction) -> AppResult<()> {
match pusher {
PusherAction::Post(data) => {
let PusherPostData {
pusher:
Pusher {
ids: PusherIds { app_id, pushkey },
kind,
app_display_name,
device_display_name,
lang,
profile_tag,
..
},
append,
} = data;
if !append {
diesel::delete(
user_pushers::table
.filter(user_pushers::user_id.eq(authed.user_id()))
.filter(user_pushers::pushkey.eq(&pushkey))
.filter(user_pushers::app_id.eq(&app_id)),
)
.execute(&mut connect()?)?;
}
diesel::insert_into(user_pushers::table)
.values(&NewDbPusher {
user_id: authed.user_id().to_owned(),
profile_tag,
kind: kind.name().to_owned(),
app_id,
app_display_name,
device_id: authed.device_id().to_owned(),
device_display_name,
access_token_id: authed.access_token_id().to_owned(),
pushkey,
lang,
data: kind.json_data()?,
enabled: true, // TODO
created_at: UnixMillis::now(),
})
.execute(&mut connect()?)?;
}
PusherAction::Delete(ids) => {
diesel::delete(
user_pushers::table
.filter(user_pushers::user_id.eq(authed.user_id()))
.filter(user_pushers::pushkey.eq(ids.pushkey))
.filter(user_pushers::app_id.eq(ids.app_id)),
)
.execute(&mut connect()?)?;
}
}
Ok(())
}
// #[tracing::instrument(skip(destination, request))]
// pub async fn send_request<T: OutgoingRequest>(destination: &str, request: T) -> AppResult<T::IncomingResponse>
// where
// T: Debug,
// {
// let destination = destination.replace("/_matrix/push/v1/notify", "");
// let http_request = request
// .try_into_http_request::<BytesMut>(&destination, SendDbAccessToken::IfRequired(""), &[MatrixVersion::V1_0])
// .map_err(|e| {
// warn!("Failed to find destination {}: {}", destination, e);
// AppError::public("Invalid destination")
// })?
// .map(|body| body.freeze());
// let reqwest_request = reqwest::Request::try_from(http_request).expect("all http requests are valid reqwest requests");
// // TODO: we could keep this very short and let expo backoff do it's thing...
// //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5));
// let url = reqwest_request.url().clone();
// let response = crate::default_client().execute(reqwest_request).await;
// match response {
// Ok(mut response) => {
// // reqwest::Response -> http::Response conversion
// let status = response.status();
// let mut http_response_builder = http::Response::builder().status(status).version(response.version());
// mem::swap(
// response.headers_mut(),
// http_response_builder.headers_mut().expect("http::response::Builder is usable"),
// );
// let body = response.bytes().await.unwrap_or_else(|e| {
// warn!("server error {}", e);
// Vec::new().into()
// }); // TODO: handle timeout
// if status != 200 {
// info!(
// "Push gateway returned bad response {} {}\n{}\n{:?}",
// destination,
// status,
// url,
// crate::utils::string_from_bytes(&body)
// );
// }
// let response = T::IncomingResponse::try_from_http_response(http_response_builder.body(body).expect("reqwest body is valid http body"));
// response.map_err(|_| {
// info!("Push gateway returned invalid response bytes {}\n{}", destination, url);
// AppError::public("Push gateway returned bad response.")
// })
// }
// Err(e) => {
// warn!("Could not send request to pusher {}: {}", destination, e);
// Err(e.into())
// }
// }
// }
#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu))]
pub async fn send_push_notice(
user: &UserId,
unread: u64,
pusher: &Pusher,
ruleset: Ruleset,
pdu: &PduEvent,
) -> AppResult<()> {
let mut notify = None;
let mut tweaks = Vec::new();
let power_levels = room::get_power_levels(&pdu.room_id).await?;
for action in data::user::pusher::get_actions(
user,
&ruleset,
&power_levels,
&pdu.to_sync_room_event(),
&pdu.room_id,
)
.await?
{
let n = match action {
Action::Notify => true,
Action::SetTweak(tweak) => {
tweaks.push(tweak.clone());
continue;
}
_ => false,
};
if notify.is_some() {
return Err(AppError::internal(
r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#,
));
}
notify = Some(n);
}
if notify == Some(true) {
send_notice(unread, pusher, tweaks, pdu).await?;
}
// Else the event triggered no actions
Ok(())
}
#[tracing::instrument(skip_all)]
async fn send_notice(
unread: u64,
pusher: &Pusher,
tweaks: Vec<Tweak>,
event: &PduEvent,
) -> AppResult<()> {
// TODO: email
match &pusher.kind {
PusherKind::Http(http) => {
// Two problems with this
// 1. if "event_id_only" is the only format kind it seems we should never add more info
// 2. can pusher/devices have conflicting formats
let event_id_only = http.format == Some(PushFormat::EventIdOnly);
let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone());
device.data.default_payload = http.default_payload.clone();
device.data.format = http.format.clone();
// Tweaks are only added if the format is NOT event_id_only
if !event_id_only {
device.tweaks = tweaks.clone();
}
let d = vec![device];
let mut notification = Notification::new(d);
notification.prio = NotificationPriority::Low;
notification.event_id = Some((*event.event_id).to_owned());
notification.room_id = Some((*event.room_id).to_owned());
// TODO: missed calls
notification.counts = NotificationCounts::new(unread, 0);
if event.event_ty == TimelineEventType::RoomEncrypted
|| tweaks
.iter()
.any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_)))
{
notification.prio = NotificationPriority::High
}
if event_id_only {
crate::sending::post(Url::parse(&http.url)?)
.stuff(SendEventNotificationReqBody::new(notification))?
.send::<()>()
.await?;
} else {
notification.sender = Some(event.sender.clone());
notification.event_type = Some(event.event_ty.clone());
notification.content = serde_json::value::to_raw_value(&event.content).ok();
if event.event_ty == TimelineEventType::RoomMember {
notification.user_is_target =
event.state_key.as_deref() == Some(event.sender.as_str());
}
notification.sender_display_name =
data::user::display_name(&event.sender).ok().flatten();
notification.room_name = room::get_name(&event.room_id).ok();
crate::sending::post(Url::parse(&http.url)?)
.stuff(SendEventNotificationReqBody::new(notification))?
.send::<()>()
.await?;
}
Ok(())
}
// TODO: Handle email
PusherKind::Email(_) => Ok(()),
_ => Ok(()),
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/reload.rs | crates/server/src/logging/reload.rs | use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use tracing_subscriber::{EnvFilter, reload};
use crate::AppResult;
/// We need to store a reload::Handle value, but can't name it's type explicitly
/// because the S type parameter depends on the subscriber's previous layers. In
/// our case, this includes unnameable 'impl Trait' types.
///
/// This is fixed[1] in the unreleased tracing-subscriber from the master
/// branch, which removes the S parameter. Unfortunately can't use it without
/// pulling in a version of tracing that's incompatible with the rest of our
/// deps.
///
/// To work around this, we define an trait without the S parameter that
/// forwards to the reload::Handle::reload method, and then store the handle as
/// a trait object.
///
/// [1]: <https://github.com/tokio-rs/tracing/pull/1035/commits/8a87ea52425098d3ef8f56d92358c2f6c144a28f>
pub trait ReloadHandle<L> {
fn current(&self) -> Option<L>;
fn reload(&self, new_value: L) -> Result<(), reload::Error>;
}
impl<L: Clone, S> ReloadHandle<L> for reload::Handle<L, S> {
fn current(&self) -> Option<L> {
Self::clone_current(self)
}
fn reload(&self, new_value: L) -> Result<(), reload::Error> {
Self::reload(self, new_value)
}
}
#[derive(Clone)]
pub struct LogLevelReloadHandles {
handles: Arc<Mutex<HandleMap>>,
}
type HandleMap = HashMap<String, Handle>;
type Handle = Box<dyn ReloadHandle<EnvFilter> + Send + Sync>;
impl LogLevelReloadHandles {
pub fn add(&self, name: &str, handle: Handle) {
self.handles
.lock()
.expect("locked")
.insert(name.into(), handle);
}
pub fn reload(&self, _new_value: &EnvFilter, names: Option<&[&str]>) -> AppResult<()> {
self.handles
.lock()
.expect("locked")
.iter()
.filter(|(name, _)| names.is_some_and(|names| names.contains(&name.as_str())))
.for_each(|(_, handle)| {
// TODO: EnvFilter clone
// _ = handle.reload(new_value.to_owned());
});
Ok(())
}
#[must_use]
pub fn current(&self, name: &str) -> Option<EnvFilter> {
self.handles
.lock()
.expect("locked")
.get(name)
.map(|handle| handle.current())?
}
}
impl Default for LogLevelReloadHandles {
fn default() -> Self {
Self {
handles: Arc::new(HandleMap::new().into()),
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/suppress.rs | crates/server/src/logging/suppress.rs | use super::EnvFilter;
pub struct Suppress {
restore: EnvFilter,
}
impl Default for Suppress {
fn default() -> Self {
let handle = "console";
let suppress = EnvFilter::default();
let conf = &crate::config::get().logger;
let restore = crate::logging::get()
.reload
.current(handle)
.unwrap_or_else(|| EnvFilter::try_new(&conf.level).unwrap_or_default());
crate::logging::get()
.reload
.reload(&suppress, Some(&[handle]))
.expect("log filter reloaded");
Self { restore }
}
}
impl Suppress {
pub fn new() -> Self {
Default::default()
}
}
impl Drop for Suppress {
fn drop(&mut self) {
crate::logging::get()
.reload
.reload(&self.restore, Some(&["console"]))
.expect("log filter reloaded");
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/capture.rs | crates/server/src/logging/capture.rs | pub mod data;
mod guard;
pub mod layer;
pub mod state;
pub mod util;
use std::sync::{Arc, Mutex};
pub use data::Data;
use guard::Guard;
pub use layer::{Layer, Value};
pub use state::State;
pub use util::*;
pub type Filter = dyn Fn(Data<'_>) -> bool + Send + Sync + 'static;
pub type Closure = dyn FnMut(Data<'_>) + Send + Sync + 'static;
/// Capture instance state.
pub struct Capture {
state: Arc<State>,
filter: Option<Box<Filter>>,
closure: Mutex<Box<Closure>>,
}
impl Capture {
/// Construct a new capture instance. Capture does not start until the Guard
/// is in scope.
#[must_use]
pub fn new<F, C>(state: &Arc<State>, filter: Option<F>, closure: C) -> Arc<Self>
where
F: Fn(Data<'_>) -> bool + Send + Sync + 'static,
C: FnMut(Data<'_>) + Send + Sync + 'static,
{
Arc::new(Self {
state: state.clone(),
filter: filter.map(|p| -> Box<Filter> { Box::new(p) }),
closure: Mutex::new(Box::new(closure)),
})
}
#[must_use]
pub fn start(self: &Arc<Self>) -> Guard {
self.state.add(self);
Guard {
capture: self.clone(),
}
}
pub fn stop(self: &Arc<Self>) {
self.state.del(self);
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/console.rs | crates/server/src/logging/console.rs | use std::{env, io, sync::LazyLock};
use tracing::{
Event, Subscriber,
field::{Field, Visit},
};
use tracing_subscriber::{
field::RecordFields,
fmt,
fmt::{
FmtContext, FormatEvent, FormatFields, MakeWriter,
format::{Compact, DefaultVisitor, Format, Json, Pretty, Writer},
},
registry::LookupSpan,
};
use crate::config::LoggerConfig;
static SYSTEMD_MODE: LazyLock<bool> =
LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok());
pub struct ConsoleWriter {
stdout: io::Stdout,
stderr: io::Stderr,
_journal_stream: [u64; 2],
use_stderr: bool,
}
impl ConsoleWriter {
#[must_use]
pub fn new(_conf: &LoggerConfig) -> Self {
let journal_stream = get_journal_stream();
Self {
stdout: io::stdout(),
stderr: io::stderr(),
_journal_stream: journal_stream.into(),
use_stderr: journal_stream.0 != 0,
}
}
}
impl<'a> MakeWriter<'a> for ConsoleWriter {
type Writer = &'a Self;
fn make_writer(&'a self) -> Self::Writer {
self
}
}
impl io::Write for &'_ ConsoleWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.use_stderr {
self.stderr.lock().write(buf)
} else {
self.stdout.lock().write(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
if self.use_stderr {
self.stderr.lock().flush()
} else {
self.stdout.lock().flush()
}
}
}
pub enum ConsoleFormat {
Compact(Format<Compact>),
Pretty(Format<Pretty>),
Json(Format<Json>),
}
impl ConsoleFormat {
#[must_use]
pub fn new(conf: &LoggerConfig) -> Self {
match &*conf.format {
"json" => Self::Json(
fmt::format()
.json()
.with_ansi(conf.ansi_colors)
.with_thread_names(true)
.with_thread_ids(true)
.with_target(true)
.with_file(true)
.with_line_number(true)
.with_source_location(true),
),
"compact" => Self::Compact(
fmt::format()
.compact()
.with_ansi(conf.ansi_colors)
.with_thread_names(true)
.with_thread_ids(true)
.with_target(true)
.with_file(true)
.with_line_number(true)
.with_source_location(true),
),
_ => Self::Pretty(
fmt::format()
.pretty()
.with_ansi(conf.ansi_colors)
.with_thread_names(true)
.with_thread_ids(true)
.with_target(true)
.with_file(true)
.with_line_number(true)
.with_source_location(true),
),
}
}
}
impl<S, N> FormatEvent<S, N> for ConsoleFormat
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &FmtContext<'_, S, N>,
writer: Writer<'_>,
event: &Event<'_>,
) -> Result<(), std::fmt::Error> {
match self {
ConsoleFormat::Compact(fmt) => fmt.format_event(ctx, writer, event),
ConsoleFormat::Pretty(fmt) => fmt.format_event(ctx, writer, event),
ConsoleFormat::Json(fmt) => fmt.format_event(ctx, writer, event),
}
}
}
struct ConsoleVisitor<'a> {
visitor: DefaultVisitor<'a>,
}
impl<'writer> FormatFields<'writer> for ConsoleFormat {
fn format_fields<R>(&self, writer: Writer<'writer>, fields: R) -> Result<(), std::fmt::Error>
where
R: RecordFields,
{
let mut visitor = ConsoleVisitor {
visitor: DefaultVisitor::<'_>::new(writer, true),
};
fields.record(&mut visitor);
Ok(())
}
}
impl Visit for ConsoleVisitor<'_> {
fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) {
if field.name().starts_with('_') {
return;
}
self.visitor.record_debug(field, value);
}
}
#[must_use]
fn get_journal_stream() -> (u64, u64) {
is_systemd_mode()
.then(|| env::var("JOURNAL_STREAM").ok())
.flatten()
.as_deref()
.and_then(|s| s.split_once(':'))
.map(|t| {
(
str::parse(t.0).unwrap_or_default(),
str::parse(t.1).unwrap_or_default(),
)
})
.unwrap_or((0, 0))
}
#[inline]
#[must_use]
pub fn is_systemd_mode() -> bool {
*SYSTEMD_MODE
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/color.rs | crates/server/src/logging/color.rs | use super::Level;
/// @returns (Foreground, Background)
#[inline]
#[must_use]
pub fn html(level: &Level) -> (&'static str, &'static str) {
match *level {
Level::TRACE => ("#000000", "#A0A0A0"),
Level::DEBUG => ("#000000", "#FFFFFF"),
Level::ERROR => ("#000000", "#FF0000"),
Level::WARN => ("#000000", "#FFFF00"),
Level::INFO => ("#FFFFFF", "#008E00"),
}
}
/// @returns (Foreground)
#[inline]
#[must_use]
pub fn code_tag(level: &Level) -> &'static str {
match *level {
Level::TRACE => "#888888",
Level::DEBUG => "#C8C8C8",
Level::ERROR => "#FF0000",
Level::WARN => "#FFFF00",
Level::INFO => "#00FF00",
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/fmt_span.rs | crates/server/src/logging/fmt_span.rs | use tracing_subscriber::fmt::format::FmtSpan;
#[inline]
pub fn from_str(str: &str) -> Result<FmtSpan, FmtSpan> {
match str.to_uppercase().as_str() {
"ENTER" => Ok(FmtSpan::ENTER),
"EXIT" => Ok(FmtSpan::EXIT),
"NEW" => Ok(FmtSpan::NEW),
"CLOSE" => Ok(FmtSpan::CLOSE),
"ACTIVE" => Ok(FmtSpan::ACTIVE),
"FULL" => Ok(FmtSpan::FULL),
"NONE" => Ok(FmtSpan::NONE),
_ => Err(FmtSpan::NONE),
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/fmt.rs | crates/server/src/logging/fmt.rs | use std::fmt::Write;
use super::{Level, color};
use crate::AppResult;
pub fn html<S>(out: &mut S, level: &Level, span: &str, msg: &str) -> AppResult<()>
where
S: Write + ?Sized,
{
let color = color::code_tag(level);
let level = level.as_str().to_uppercase();
write!(
out,
"<font data-mx-color=\"{color}\"><code>{level:>5}</code></font> <code>{span:^12}</code> \
<code>{msg}</code><br>"
)?;
Ok(())
}
pub fn markdown<S>(out: &mut S, level: &Level, span: &str, msg: &str) -> AppResult<()>
where
S: Write + ?Sized,
{
let level = level.as_str().to_uppercase();
writeln!(out, "`{level:>5}` `{span:^12}` `{msg}`")?;
Ok(())
}
pub fn markdown_table<S>(out: &mut S, level: &Level, span: &str, msg: &str) -> AppResult<()>
where
S: Write + ?Sized,
{
let level = level.as_str().to_uppercase();
writeln!(out, "| {level:>5} | {span:^12} | {msg} |")?;
Ok(())
}
pub fn markdown_table_head<S>(out: &mut S) -> AppResult<()>
where
S: Write + ?Sized,
{
write!(
out,
"| level | span | message |\n| ------: | :-----: | :------- |\n"
)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/capture/guard.rs | crates/server/src/logging/capture/guard.rs | use std::sync::Arc;
use super::Capture;
/// Capture instance scope guard.
pub struct Guard {
pub(super) capture: Arc<Capture>,
}
impl Drop for Guard {
#[inline]
fn drop(&mut self) {
self.capture.stop();
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/capture/state.rs | crates/server/src/logging/capture/state.rs | use std::sync::{Arc, RwLock};
use super::Capture;
/// Capture layer state.
pub struct State {
pub(super) active: RwLock<Vec<Arc<Capture>>>,
}
impl Default for State {
fn default() -> Self {
Self::new()
}
}
impl State {
#[must_use]
pub fn new() -> Self {
Self {
active: RwLock::new(Vec::new()),
}
}
pub(super) fn add(&self, capture: &Arc<Capture>) {
self.active
.write()
.expect("locked for writing")
.push(capture.clone());
}
pub(super) fn del(&self, capture: &Arc<Capture>) {
let mut vec = self.active.write().expect("locked for writing");
if let Some(pos) = vec.iter().position(|v| Arc::ptr_eq(v, capture)) {
vec.swap_remove(pos);
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/capture/layer.rs | crates/server/src/logging/capture/layer.rs | use std::{fmt, sync::Arc};
use tracing::field::{Field, Visit};
use tracing_core::{Event, Subscriber};
use tracing_subscriber::{layer::Context, registry::LookupSpan};
use super::{Capture, Data, State};
pub struct Layer {
state: Arc<State>,
}
struct Visitor {
values: Values,
}
type Values = Vec<Value>;
pub type Value = (&'static str, String);
type ScopeNames = Vec<&'static str>;
impl Layer {
#[inline]
pub fn new(state: &Arc<State>) -> Self {
Self {
state: state.clone(),
}
}
}
impl fmt::Debug for Layer {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.debug_struct("capture::Layer").finish()
}
}
impl<S> tracing_subscriber::Layer<S> for Layer
where
S: Subscriber + for<'a> LookupSpan<'a>,
{
fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) {
self.state
.active
.read()
.expect("shared lock")
.iter()
.filter(|capture| filter(self, capture, event, &ctx))
.for_each(|capture| handle(self, capture, event, &ctx));
}
}
fn handle<S>(layer: &Layer, capture: &Capture, event: &Event<'_>, ctx: &Context<'_, S>)
where
S: Subscriber + for<'a> LookupSpan<'a>,
{
let names = ScopeNames::new();
let mut visitor = Visitor {
values: Values::new(),
};
event.record(&mut visitor);
let mut closure = capture.closure.lock().expect("exclusive lock");
closure(Data {
layer,
event,
current: &ctx.current_span(),
values: &visitor.values,
scope: &names,
});
}
fn filter<S>(layer: &Layer, capture: &Capture, event: &Event<'_>, ctx: &Context<'_, S>) -> bool
where
S: Subscriber + for<'a> LookupSpan<'a>,
{
let values = Values::new();
let mut names = ScopeNames::new();
if let Some(scope) = ctx.event_scope(event) {
for span in scope {
names.push(span.name());
}
}
capture.filter.as_ref().is_none_or(|filter| {
filter(Data {
layer,
event,
current: &ctx.current_span(),
values: &values,
scope: &names,
})
})
}
impl Visit for Visitor {
fn record_debug(&mut self, f: &Field, v: &dyn fmt::Debug) {
self.values.push((f.name(), format!("{v:?}")));
}
fn record_str(&mut self, f: &Field, v: &str) {
self.values.push((f.name(), v.to_owned()));
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/capture/util.rs | crates/server/src/logging/capture/util.rs | use std::sync::{Arc, Mutex};
use super::{
super::{Level, fmt},
Closure, Data,
};
use crate::AppResult;
pub fn fmt_html<S>(out: Arc<Mutex<S>>) -> Box<Closure>
where
S: std::fmt::Write + Send + 'static,
{
fmt(fmt::html, out)
}
pub fn fmt_markdown<S>(out: Arc<Mutex<S>>) -> Box<Closure>
where
S: std::fmt::Write + Send + 'static,
{
fmt(fmt::markdown, out)
}
pub fn fmt<F, S>(fun: F, out: Arc<Mutex<S>>) -> Box<Closure>
where
F: Fn(&mut S, &Level, &str, &str) -> AppResult<()> + Send + Sync + Copy + 'static,
S: std::fmt::Write + Send + 'static,
{
Box::new(move |data| call(fun, &mut *out.lock().expect("locked"), &data))
}
fn call<F, S>(fun: F, out: &mut S, data: &Data<'_>)
where
F: Fn(&mut S, &Level, &str, &str) -> AppResult<()>,
S: std::fmt::Write,
{
fun(out, &data.level(), data.span_name(), data.message()).expect("log line appended");
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/logging/capture/data.rs | crates/server/src/logging/capture/data.rs | use tracing::Level;
use tracing_core::{Event, span::Current};
use super::{Layer, layer::Value};
pub struct Data<'a> {
pub layer: &'a Layer,
pub event: &'a Event<'a>,
pub current: &'a Current,
pub values: &'a [Value],
pub scope: &'a [&'static str],
}
impl Data<'_> {
#[must_use]
pub fn our_modules(&self) -> bool {
self.mod_name().starts_with("palpo")
}
#[must_use]
pub fn level(&self) -> Level {
*self.event.metadata().level()
}
#[must_use]
pub fn mod_name(&self) -> &str {
self.event.metadata().module_path().unwrap_or_default()
}
#[must_use]
pub fn span_name(&self) -> &str {
self.current.metadata().map_or("", |s| s.name())
}
#[must_use]
pub fn message(&self) -> &str {
self.values
.iter()
.find(|(k, _)| *k == "message")
.map_or("", |(_, v)| v.as_str())
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/sending/guard.rs | crates/server/src/sending/guard.rs | use std::collections::{BTreeMap, HashMap, HashSet};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use futures_util::stream::{FuturesUnordered, StreamExt};
use tokio::sync::{Mutex, mpsc};
use super::{
EduBuf, EduVec, MPSC_RECEIVER, MPSC_SENDER, OutgoingKind, SELECT_EDU_LIMIT,
SELECT_PRESENCE_LIMIT, SELECT_RECEIPT_LIMIT, SendingEventType, TransactionStatus,
};
use crate::core::device::DeviceListUpdateContent;
use crate::core::events::receipt::{ReceiptContent, ReceiptData, ReceiptMap, ReceiptType};
use crate::core::federation::transaction::Edu;
use crate::core::identifiers::*;
use crate::core::presence::{PresenceContent, PresenceUpdate};
use crate::core::{Seqnum, device_id};
use crate::room::state;
use crate::{AppResult, data, exts::*, room};
pub fn start() {
let (sender, receiver) = mpsc::unbounded_channel();
let _ = MPSC_SENDER.set(sender);
let _ = MPSC_RECEIVER.set(Mutex::new(receiver));
tokio::spawn(async move {
process().await.unwrap();
});
}
async fn process() -> AppResult<()> {
let mut receiver = MPSC_RECEIVER
.get()
.expect("receiver should exist")
.lock()
.await;
let mut futures = FuturesUnordered::new();
let mut current_transaction_status = HashMap::<OutgoingKind, TransactionStatus>::new();
// Retry requests we could not finish yet
let mut initial_transactions = HashMap::<OutgoingKind, Vec<SendingEventType>>::new();
for (id, outgoing_kind, event) in super::active_requests()? {
let entry = initial_transactions
.entry(outgoing_kind.clone())
.or_default();
if entry.len() > 30 {
warn!(
"Dropping some current events: {:?} {:?} {:?}",
id, outgoing_kind, event
);
super::delete_request(id)?;
continue;
}
entry.push(event);
}
for (outgoing_kind, events) in initial_transactions {
current_transaction_status.insert(outgoing_kind.clone(), TransactionStatus::Running);
futures.push(super::send_events(outgoing_kind.clone(), events));
}
loop {
tokio::select! {
Some(response) = futures.next() => {
match response {
Ok(outgoing_kind) => {
super::delete_all_active_requests_for(&outgoing_kind)?;
// Find events that have been added since starting the last request
let new_events = super::queued_requests(&outgoing_kind).unwrap_or_default().into_iter().take(30).collect::<Vec<_>>();
if !new_events.is_empty() {
// Insert pdus we found
super::mark_as_active(&new_events)?;
futures.push(
super::send_events(
outgoing_kind.clone(),
new_events.into_iter().map(|(_, event)| event).collect(),
)
);
} else {
current_transaction_status.remove(&outgoing_kind);
}
}
Err((outgoing_kind, event)) => {
error!("failed to send event: {event:?} outgoing_kind:{outgoing_kind:?}");
current_transaction_status.entry(outgoing_kind).and_modify(|e| *e = match e {
TransactionStatus::Running => {
TransactionStatus::Failed(1, Instant::now())
},
TransactionStatus::Retrying(n) => {
TransactionStatus::Failed(n.saturating_add(1), Instant::now())
},
TransactionStatus::Failed(_, _) => {
error!("Request that was not even running failed?!");
return
},
});
}
};
},
Some((outgoing_kind, event, id)) = receiver.recv() => {
if let Ok(Some(events)) = select_events(
&outgoing_kind,
vec![(id, event)],
&mut current_transaction_status,
) {
futures.push(super::send_events(outgoing_kind, events));
}
}
}
}
}
#[tracing::instrument(skip_all)]
fn select_events(
outgoing_kind: &OutgoingKind,
new_events: Vec<(i64, SendingEventType)>, // Events we want to send: event and full key
current_transaction_status: &mut HashMap<OutgoingKind, TransactionStatus>,
) -> AppResult<Option<Vec<SendingEventType>>> {
let mut retry = false;
let mut allow = true;
let entry = current_transaction_status.entry(outgoing_kind.clone());
entry
.and_modify(|e| match e {
TransactionStatus::Running | TransactionStatus::Retrying(_) => {
allow = false; // already running
}
TransactionStatus::Failed(tries, time) => {
// Fail if a request has failed recently (exponential backoff)
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
allow = false;
} else {
retry = true;
*e = TransactionStatus::Retrying(*tries);
}
}
})
.or_insert(TransactionStatus::Running);
if !allow {
return Ok(None);
}
let mut events = Vec::new();
if retry {
// We retry the previous transaction
for (_, e) in super::active_requests_for(outgoing_kind)? {
events.push(e);
}
} else {
super::mark_as_active(&new_events)?;
for (_, e) in new_events {
events.push(e);
}
if let OutgoingKind::Normal(server_name) = outgoing_kind
&& let Ok((select_edus, _last_count)) = select_edus(server_name)
{
events.extend(select_edus.into_iter().map(SendingEventType::Edu));
}
}
Ok(Some(events))
}
/// Look for device changes
#[tracing::instrument(level = "trace", skip(server_name))]
fn select_edus_device_changes(
server_name: &ServerName,
since_sn: Seqnum,
_max_edu_sn: &Seqnum,
events_len: &AtomicUsize,
) -> AppResult<EduVec> {
let mut events = EduVec::new();
let server_rooms = state::server_joined_rooms(server_name)?;
let mut device_list_changes = HashSet::<OwnedUserId>::new();
for room_id in server_rooms {
let keys_changed = room::keys_changed_users(&room_id, since_sn, None)?
.into_iter()
.filter(|user_id| user_id.is_local());
for user_id in keys_changed {
// max_edu_sn.fetch_max(event_sn, Ordering::Relaxed);
if !device_list_changes.insert(user_id.clone()) {
continue;
}
// Empty prev id forces synapse to resync; because synapse resyncs,
// we can just insert placeholder data
let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
user_id,
device_id: device_id!("placeholder").to_owned(),
device_display_name: Some("Placeholder".to_owned()),
stream_id: 1,
prev_id: Vec::new(),
deleted: None,
keys: None,
});
let mut buf = EduBuf::new();
serde_json::to_writer(&mut buf, &edu)
.expect("failed to serialize device list update to JSON");
events.push(buf);
if events_len.fetch_add(1, Ordering::Relaxed) >= SELECT_EDU_LIMIT - 1 {
return Ok(events);
}
}
}
Ok(events)
}
/// Look for read receipts in this room
#[tracing::instrument(level = "trace", skip(server_name, max_edu_sn))]
fn select_edus_receipts(
server_name: &ServerName,
since_sn: Seqnum,
max_edu_sn: &Seqnum,
) -> AppResult<Option<EduBuf>> {
let mut num = 0;
let receipts: BTreeMap<OwnedRoomId, ReceiptMap> = state::server_joined_rooms(server_name)?
.into_iter()
.filter_map(|room_id| {
let receipt_map =
select_edus_receipts_room(&room_id, since_sn, max_edu_sn, &mut num).ok()?;
receipt_map
.read
.is_empty()
.eq(&false)
.then_some((room_id, receipt_map))
})
.collect();
if receipts.is_empty() {
return Ok(None);
}
let receipt_content = Edu::Receipt(ReceiptContent::new(receipts));
let mut buf = EduBuf::new();
serde_json::to_writer(&mut buf, &receipt_content)
.expect("Failed to serialize Receipt EDU to JSON vec");
Ok(Some(buf))
}
/// Look for read receipts in this room
#[tracing::instrument(level = "trace", skip(since_sn))]
fn select_edus_receipts_room(
room_id: &RoomId,
since_sn: Seqnum,
_max_edu_sn: &Seqnum,
num: &mut usize,
) -> AppResult<ReceiptMap> {
let receipts = data::room::receipt::read_receipts(room_id, since_sn)?;
let mut read = BTreeMap::<OwnedUserId, ReceiptData>::new();
for (user_id, read_receipt) in receipts {
// if count > since_sn {
// break;
// }
// max_edu_sn.fetch_max(occur_sn, Ordering::Relaxed);
if !user_id.is_local() {
continue;
}
// let Ok(event) = serde_json::from_str(read_receipt.inner().get()) else {
// error!(
// ?user_id,
// ?read_receipt,
// "Invalid edu event in read_receipts."
// );
// continue;
// };
// let AnySyncEphemeralRoomEvent::Receipt(r) = event else {
// error!(?user_id, ?event, "Invalid event type in read_receipts");
// continue;
// };
let (event_id, mut receipt) = read_receipt
.0
.into_iter()
.next()
.expect("we only use one event per read receipt");
let receipt = receipt
.remove(&ReceiptType::Read)
.expect("our read receipts always set this")
.remove(&user_id)
.expect("our read receipts always have the user here");
let receipt_data = ReceiptData {
data: receipt,
event_ids: vec![event_id.clone()],
};
if read.insert(user_id.to_owned(), receipt_data).is_none() {
*num = num.saturating_add(1);
if *num >= SELECT_RECEIPT_LIMIT {
break;
}
}
}
Ok(ReceiptMap { read })
}
/// Look for presence
#[tracing::instrument(level = "trace", skip(server_name))]
fn select_edus_presence(
server_name: &ServerName,
since_sn: Seqnum,
_max_edu_sn: &Seqnum,
) -> AppResult<Option<EduBuf>> {
let presences_since = crate::data::user::presences_since(since_sn)?;
let mut presence_updates = HashMap::<OwnedUserId, PresenceUpdate>::new();
for (user_id, presence_event) in presences_since {
// max_edu_sn.fetch_max(occur_sn, Ordering::Relaxed);
if !user_id.is_local() {
continue;
}
if !state::server_can_see_user(server_name, &user_id)? {
continue;
}
let update = PresenceUpdate {
user_id: user_id.clone(),
presence: presence_event.content.presence,
currently_active: presence_event.content.currently_active.unwrap_or(false),
status_msg: presence_event.content.status_msg,
last_active_ago: presence_event.content.last_active_ago.unwrap_or(0),
};
presence_updates.insert(user_id, update);
if presence_updates.len() >= SELECT_PRESENCE_LIMIT {
break;
}
}
if presence_updates.is_empty() {
return Ok(None);
}
let presence_content = Edu::Presence(PresenceContent {
push: presence_updates.into_values().collect(),
});
let mut buf = EduBuf::new();
serde_json::to_writer(&mut buf, &presence_content)
.expect("failed to serialize Presence EDU to JSON");
Ok(Some(buf))
}
#[tracing::instrument(skip(server_name))]
pub fn select_edus(server_name: &ServerName) -> AppResult<(EduVec, i64)> {
let max_edu_sn = data::curr_sn()?;
let conf = crate::config::get();
let since_sn = data::curr_sn()?;
let events_len = AtomicUsize::default();
let device_changes =
select_edus_device_changes(server_name, since_sn, &max_edu_sn, &events_len)?;
let mut events = device_changes;
if conf.read_receipt.allow_outgoing
&& let Some(receipts) = select_edus_receipts(server_name, since_sn, &max_edu_sn)?
{
events.push(receipts);
}
if conf.presence.allow_outgoing
&& let Some(presence) = select_edus_presence(server_name, since_sn, &max_edu_sn)?
{
events.push(presence);
}
Ok((events, max_edu_sn))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/sending/dest.rs | crates/server/src/sending/dest.rs | use std::fmt::Debug;
use crate::core::{OwnedServerName, OwnedUserId};
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Destination {
Appservice(String),
Push(OwnedUserId, String), // user and pushkey
Federation(OwnedServerName),
}
impl Destination {
#[must_use]
pub(super) fn get_prefix(&self) -> Vec<u8> {
match self {
Self::Federation(server) => {
let len = server.as_bytes().len().saturating_add(1);
let mut p = Vec::with_capacity(len);
p.extend_from_slice(server.as_bytes());
p.push(0xFF);
p
}
Self::Appservice(server) => {
let sigil = b"+";
let len = sigil.len().saturating_add(server.len()).saturating_add(1);
let mut p = Vec::with_capacity(len);
p.extend_from_slice(sigil);
p.extend_from_slice(server.as_bytes());
p.push(0xFF);
p
}
Self::Push(user, pushkey) => {
let sigil = b"$";
let len = sigil
.len()
.saturating_add(user.as_bytes().len())
.saturating_add(1)
.saturating_add(pushkey.len())
.saturating_add(1);
let mut p = Vec::with_capacity(len);
p.extend_from_slice(sigil);
p.extend_from_slice(user.as_bytes());
p.push(0xFF);
p.extend_from_slice(pushkey.as_bytes());
p.push(0xFF);
p
}
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/sending/resolver.rs | crates/server/src/sending/resolver.rs | use std::error::Error as StdError;
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use std::{future, iter};
use futures_util::FutureExt;
use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName};
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
use tower_service::Service as TowerService;
use crate::TlsNameMap;
pub const MXC_LENGTH: usize = 32;
pub const DEVICE_ID_LENGTH: usize = 10;
pub const TOKEN_LENGTH: usize = 32;
pub const SESSION_ID_LENGTH: usize = 32;
pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;
pub const RANDOM_USER_ID_LENGTH: usize = 10;
pub struct Resolver {
inner: GaiResolver,
overrides: Arc<RwLock<TlsNameMap>>,
}
impl Resolver {
pub fn new(overrides: Arc<RwLock<TlsNameMap>>) -> Self {
Resolver {
inner: GaiResolver::new(),
overrides,
}
}
}
impl Resolve for Resolver {
fn resolve(&self, name: Name) -> Resolving {
self.overrides
.read()
.unwrap()
.get(name.as_str())
.and_then(|(override_name, port)| {
override_name.first().map(|first_name| {
let x: Box<dyn Iterator<Item = SocketAddr> + Send> =
Box::new(iter::once(SocketAddr::new(*first_name, *port)));
let x: Resolving = Box::pin(future::ready(Ok(x)));
x
})
})
.unwrap_or_else(|| {
let this = &mut self.inner.clone();
Box::pin(
TowerService::<HyperName>::call(
this,
// Beautiful hack, please remove this in the future.
HyperName::from_str(name.as_str())
.expect("reqwest Name is just wrapper for hyper-util Name"),
)
.map(|result| {
result
.map(|addrs| -> Addrs { Box::new(addrs) })
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
}),
)
})
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/membership/banned.rs | crates/server/src/membership/banned.rs | use salvo::conn::SocketAddr;
use crate::core::events::room::message::RoomMessageEventContent;
use crate::core::identifiers::*;
use crate::{AppResult, MatrixError, data};
/// Checks if the room is banned in any way possible and the sender user is not an admin.
///
/// Performs automatic deactivation if `auto_deactivate_banned_room_attempts` is enabled.
#[tracing::instrument]
pub async fn banned_room_check(
user_id: &UserId,
room_id: Option<&RoomId>,
server_name: Option<&ServerName>,
client_addr: &SocketAddr,
) -> AppResult<()> {
if data::user::is_admin(user_id)? {
return Ok(());
}
let conf = crate::config::get();
if let Some(room_id) = room_id {
if data::room::is_disabled(room_id)?
|| (room_id.server_name().is_ok()
&& conf.forbidden_remote_server_names.is_match(
room_id
.server_name()
.expect("room server name checked exists")
.host(),
))
{
warn!(
"User {user_id} who is not an admin attempted to send an invite for or \
attempted to join a banned room or banned room server name: {room_id}"
);
if conf.auto_deactivate_banned_room_attempts {
warn!(
"Automatically deactivating user {user_id} due to attempted banned room join"
);
if conf.admin.room_notices {
crate::admin::send_message(RoomMessageEventContent::text_plain(format!(
"Automatically deactivating user {user_id} due to attempted banned \
room join from IP {client_addr}"
)))
.await
.ok();
}
let all_joined_rooms: Vec<OwnedRoomId> = data::user::joined_rooms(user_id)?;
crate::user::full_user_deactivate(user_id, &all_joined_rooms).await?;
}
return Err(
MatrixError::forbidden("This room is banned on this homeserver.", None).into(),
);
}
} else if let Some(server_name) = server_name
&& conf
.forbidden_remote_server_names
.is_match(server_name.host())
{
warn!(
"User {user_id} who is not an admin tried joining a room which has the server \
name {server_name} that is globally forbidden. Rejecting.",
);
if conf.auto_deactivate_banned_room_attempts {
warn!("Automatically deactivating user {user_id} due to attempted banned room join");
if conf.admin.room_notices {
crate::admin::send_message(RoomMessageEventContent::text_plain(format!(
"Automatically deactivating user {user_id} due to attempted banned \
room join from IP {client_addr}"
)))
.await
.ok();
}
let all_joined_rooms = data::user::joined_rooms(user_id)?;
crate::user::full_user_deactivate(user_id, &all_joined_rooms).await?;
}
return Err(MatrixError::forbidden(
"This remote server is banned on this homeserver.",
None,
)
.into());
}
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/membership/forget.rs | crates/server/src/membership/forget.rs | use diesel::prelude::*;
use crate::core::identifiers::*;
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
use crate::{AppResult, MatrixError};
/// Makes a user forget a room.
#[tracing::instrument]
pub fn forget_room(user_id: &UserId, room_id: &RoomId) -> AppResult<()> {
if diesel_exists!(
room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq("join")),
&mut connect()?
)? {
return Err(MatrixError::unknown("The user has not left the room.").into());
}
diesel::update(
room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id)),
)
.set(room_users::forgotten.eq(true))
.execute(&mut connect()?)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/membership/join.rs | crates/server/src/membership/join.rs |
use std::collections::{BTreeMap, HashMap};
use diesel::prelude::*;
use indexmap::IndexMap;
use palpo_core::serde::JsonValue;
use palpo_data::user::DbUser;
use salvo::http::StatusError;
use tokio::sync::RwLock;
use crate::appservice::RegistrationInfo;
use crate::core::UnixMillis;
use crate::core::client::membership::{JoinRoomResBody, ThirdPartySigned};
use crate::core::device::DeviceListUpdateContent;
use crate::core::events::TimelineEventType;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::federation::membership::{
MakeJoinReqArgs, MakeJoinResBody, SendJoinArgs, SendJoinReqBody, SendJoinResBodyV2,
};
use crate::core::federation::transaction::Edu;
use crate::core::identifiers::*;
use crate::core::serde::{
CanonicalJsonObject, CanonicalJsonValue, to_canonical_value, to_raw_json_value,
};
use crate::data::room::{DbEventData, NewDbEvent};
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
use crate::event::handler::process_incoming_pdu;
use crate::event::{
PduBuilder, PduEvent, ensure_event_sn, gen_event_id_canonical_json, parse_fetched_pdu,
};
use crate::federation::maybe_strip_event_id;
use crate::room::{state, timeline};
use crate::sending::send_edu_server;
use crate::{
AppError, AppResult, GetUrlOrigin, IsRemoteOrLocal, MatrixError, OptionalExtension, SnPduEvent,
config, data, room, sending,
};
pub async fn join_room(
sender: &DbUser,
device_id: Option<&DeviceId>,
room_id: &RoomId,
reason: Option<String>,
servers: &[OwnedServerName],
_third_party_signed: Option<&ThirdPartySigned>,
appservice: Option<&RegistrationInfo>,
extra_data: BTreeMap<String, JsonValue>,
) -> AppResult<JoinRoomResBody> {
if sender.is_guest && appservice.is_none() && !room::guest_can_join(room_id) {
return Err(
MatrixError::forbidden("guests are not allowed to join this room", None).into(),
);
}
let sender_id = &sender.id;
if room::user::is_joined(sender_id, room_id)? {
return Ok(JoinRoomResBody {
room_id: room_id.into(),
});
}
if let Ok(membership) = room::get_member(room_id, sender_id, None)
&& membership.membership == MembershipState::Ban
{
tracing::warn!(
"{} is banned from {room_id} but attempted to join",
sender_id
);
return Err(MatrixError::forbidden("you are banned from the room", None).into());
}
// Ask a remote server if we are not participating in this room
let (should_remote, servers) =
room::should_join_on_remote_servers(sender_id, room_id, servers).await?;
if !should_remote {
info!("we can join locally");
let join_rule = room::get_join_rule(room_id)?;
let event = RoomMemberEventContent {
membership: MembershipState::Join,
display_name: data::user::display_name(sender_id).ok().flatten(),
avatar_url: data::user::avatar_url(sender_id).ok().flatten(),
is_direct: None,
third_party_invite: None,
blurhash: data::user::blurhash(sender_id).ok().flatten(),
reason: reason.clone(),
join_authorized_via_users_server: get_first_user_can_issue_invite(
room_id,
sender_id,
&join_rule.restriction_rooms(),
)
.await
.ok(),
extra_data: extra_data.clone(),
};
match timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_json_value(&event).expect("event is valid, we just created it"),
state_key: Some(sender_id.to_string()),
..Default::default()
},
sender_id,
room_id,
&crate::room::get_version(room_id)?,
&room::lock_state(room_id).await,
)
.await
{
Ok(pdu) => {
if let Some(device_id) = device_id {
crate::user::mark_device_key_update_with_joined_rooms(
sender_id,
device_id,
&[room_id.to_owned()],
)?;
}
if let Err(e) = sending::send_pdu_room(room_id, &pdu.event_id, &[], &[]) {
error!("failed to notify banned user server: {e}");
}
return Ok(JoinRoomResBody::new(room_id.to_owned()));
}
Err(e) => {
tracing::error!("failed to append join event locally: {e}");
if servers.is_empty() || servers.iter().all(|s| s.is_local()) {
return Err(e);
}
}
}
}
info!("joining {room_id} over federation");
let (make_join_response, remote_server) =
make_join_request(sender_id, room_id, &servers).await?;
info!("make join finished");
let room_version = match make_join_response.room_version {
Some(room_version) if config::supported_room_versions().contains(&room_version) => {
room_version
}
_ => return Err(AppError::public("room version is not supported")),
};
let mut join_event_stub: CanonicalJsonObject =
serde_json::from_str(make_join_response.event.get())
.map_err(|_| AppError::public("invalid make_join event json received from server"))?;
let join_authorized_via_users_server = join_event_stub
.get("content")
.map(|s| {
s.as_object()?
.get("join_authorised_via_users_server")?
.as_str()
})
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
// TODO: Is origin needed?
join_event_stub.insert(
"origin".to_owned(),
CanonicalJsonValue::String(config::get().server_name.as_str().to_owned()),
);
if !join_event_stub.contains_key("origin_server_ts") {
join_event_stub.insert(
"origin_server_ts".to_owned(),
CanonicalJsonValue::Integer(UnixMillis::now().get() as i64),
);
}
join_event_stub.insert(
"content".to_owned(),
to_canonical_value(RoomMemberEventContent {
membership: MembershipState::Join,
display_name: data::user::display_name(sender_id)?,
avatar_url: data::user::avatar_url(sender_id)?,
is_direct: None,
third_party_invite: None,
blurhash: data::user::blurhash(sender_id)?,
reason,
join_authorized_via_users_server,
extra_data: extra_data.clone(),
})
.expect("event is valid, we just created it"),
);
// We keep the "event_id" in the pdu only in v1 or v2 rooms
maybe_strip_event_id(&mut join_event_stub, &room_version);
// In order to create a compatible ref hash (EventID) the `hashes` field needs to be present
crate::server_key::hash_and_sign_event(&mut join_event_stub, &room_version)
.expect("event is valid, we just created it");
// Generate event id
let event_id = crate::event::gen_event_id(&join_event_stub, &room_version)?;
// Add event_id back
join_event_stub.insert(
"event_id".to_owned(),
CanonicalJsonValue::String(event_id.as_str().to_owned()),
);
// It has enough fields to be called a proper event now
let mut join_event = join_event_stub;
let body = SendJoinReqBody(crate::sending::convert_to_outgoing_federation_event(
join_event.clone(),
));
info!("asking {remote_server} for send_join");
let send_join_request = crate::core::federation::membership::send_join_request(
&remote_server.origin().await,
SendJoinArgs {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
omit_members: false,
},
body,
)?
.into_inner();
let send_join_body =
crate::sending::send_federation_request(&remote_server, send_join_request, None)
.await?
.json::<SendJoinResBodyV2>()
.await?;
info!("send_join finished");
if let Some(signed_raw) = &send_join_body.0.event {
info!(
"there is a signed event. this room is probably using restricted joins. adding signature to our event"
);
let (signed_event_id, signed_value) =
match gen_event_id_canonical_json(signed_raw, &room_version) {
Ok(t) => t,
Err(_) => {
// Event could not be converted to canonical json
return Err(MatrixError::invalid_param(
"could not convert event to canonical json",
)
.into());
}
};
if signed_event_id != event_id {
return Err(MatrixError::invalid_param("server sent event with wrong event id").into());
}
match signed_value["signatures"]
.as_object()
.ok_or(MatrixError::invalid_param(
"server sent invalid signatures type",
))
.and_then(|e| {
e.get(remote_server.as_str())
.ok_or(MatrixError::invalid_param(
"server did not send its signature",
))
}) {
Ok(signature) => {
join_event
.get_mut("signatures")
.expect("we created a valid pdu")
.as_object_mut()
.expect("we created a valid pdu")
.insert(remote_server.to_string(), signature.clone());
}
Err(e) => {
warn!(
"server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}",
);
}
}
}
room::ensure_room(room_id, &room_version)?;
let parsed_join_pdu = PduEvent::from_canonical_object(room_id, &event_id, join_event.clone())
.map_err(|e| {
warn!("invalid pdu in send_join response: {}", e);
AppError::public("invalid join event pdu")
})?;
let join_event_id = parsed_join_pdu.event_id.clone();
let (join_event_sn, event_guard) = ensure_event_sn(room_id, &join_event_id)?;
let mut state = HashMap::new();
let pub_key_map = RwLock::new(BTreeMap::new());
info!("acquiring server signing keys for response events");
let resp_events = &send_join_body.0;
let resp_state = &resp_events.state;
let resp_auth = &resp_events.auth_chain;
crate::server_key::acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter())).await;
super::update_membership(
&join_event_id,
join_event_sn,
room_id,
sender_id,
MembershipState::Join,
sender_id,
None,
)?;
let mut parsed_pdus = IndexMap::new();
for auth_pdu in resp_auth {
let (event_id, event_value) = parse_fetched_pdu(room_id, &room_version, auth_pdu)?;
parsed_pdus.insert(event_id, event_value);
}
for state in resp_state {
let (event_id, event_value) = parse_fetched_pdu(room_id, &room_version, state)?;
parsed_pdus.insert(event_id, event_value);
}
for (event_id, event_value) in parsed_pdus {
if let Err(e) = process_incoming_pdu(
&remote_server,
&event_id,
room_id,
&room_version,
event_value,
true,
false,
)
.await
{
error!("failed to process incoming events for join: {e}");
}
}
info!("going through send_join response room_state");
for result in send_join_body
.0
.state
.iter()
.map(|pdu| super::validate_and_add_event_id(pdu, &room_version, &pub_key_map))
{
let (event_id, value) = match result.await {
Ok(t) => t,
Err(_) => continue,
};
let pdu = if let Some(pdu) = timeline::get_pdu(&event_id).optional()? {
pdu
} else {
let (event_sn, event_guard) = ensure_event_sn(room_id, &event_id)?;
let pdu = SnPduEvent::from_canonical_object(
room_id,
&event_id,
event_sn,
value.clone(),
false,
false,
false,
)
.map_err(|e| {
warn!("invalid pdu in send_join response: {} {:?}", e, value);
AppError::public("invalid pdu in send_join response.")
})?;
NewDbEvent::from_canonical_json(&event_id, event_sn, &value, false)?.save()?;
DbEventData {
event_id: pdu.event_id.to_owned(),
event_sn,
room_id: pdu.room_id.clone(),
internal_metadata: None,
json_data: serde_json::to_value(&value)?,
format_version: None,
}
.save()?;
drop(event_guard);
pdu
};
if let Some(state_key) = &pdu.state_key {
let state_key_id = state::ensure_field_id(&pdu.event_ty.to_string().into(), state_key)?;
state.insert(state_key_id, (pdu.event_id.clone(), pdu.event_sn));
}
}
info!("going through send_join response auth_chain");
for result in send_join_body
.0
.auth_chain
.iter()
.map(|pdu| super::validate_and_add_event_id(pdu, &room_version, &pub_key_map))
{
let (event_id, value) = match result.await {
Ok(t) => t,
Err(_) => continue,
};
if !timeline::has_pdu(&event_id) {
let (event_sn, event_guard) = ensure_event_sn(room_id, &event_id)?;
NewDbEvent::from_canonical_json(&event_id, event_sn, &value, false)?.save()?;
DbEventData {
event_id: event_id.to_owned(),
event_sn,
room_id: room_id.to_owned(),
internal_metadata: None,
json_data: serde_json::to_value(&value)?,
format_version: None,
}
.save()?;
drop(event_guard);
}
}
info!("running send_join auth check");
// TODO: Authcheck
// if !event_auth::auth_check(
// &RoomVersion::new(&room_version_id)?,
// &parsed_join_pdu,
// None::<PduEvent>, // TODO: third party invite
// |k, s| {
// timeline::get_pdu(
// state.get(&state::ensure_field_id(&k.to_string().into(), s).ok()?)?,
// )
// .ok()?
// },
// )
// .map_err(|e| {
// warn!("Auth check failed when running send_json auth check: {e}");
// MatrixError::invalid_param("Auth check failed when running send_json auth check")
// })? {
// return Err(MatrixError::invalid_param("Auth check failed when running send_json auth check").into());
// }
// info!("saving state from send_join");
// let DeltaInfo {
// frame_id,
// appended,
// disposed,
// } = state::save_state(
// room_id,
// Arc::new(
// state
// .into_iter()
// .map(|(k, (_event_id, event_sn))| Ok(CompressedEvent::new(k, event_sn)))
// .collect::<AppResult<_>>()?,
// ),
// )?;
// state::force_state(room_id, frame_id, appended, disposed)?;
// info!("Updating joined counts for new room");
// room::update_joined_servers(room_id)?;
// room::update_currents(room_id)?;
let state_lock = room::lock_state(room_id).await;
info!("appending new room join event");
diesel::insert_into(events::table)
.values(NewDbEvent::from_canonical_json(
&event_id,
join_event_sn,
&join_event,
false,
)?)
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
let join_pdu = SnPduEvent {
pdu: parsed_join_pdu,
event_sn: join_event_sn,
is_outlier: false,
soft_failed: false,
is_backfill: false,
};
timeline::append_pdu(&join_pdu, join_event, &state_lock).await?;
let frame_id_after_join = state::append_to_state(&join_pdu)?;
drop(event_guard);
info!("setting final room state for new room");
// We set the room state after inserting the pdu, so that we never have a moment in time
// where events in the current room state do not exist
state::set_room_state(room_id, frame_id_after_join)?;
drop(state_lock);
if let Some(device_id) = device_id
&& let Ok(room_server_id) = room_id.server_name()
{
let query = room_users::table
.filter(room_users::room_id.ne(room_id))
.filter(room_users::user_id.eq(sender_id))
.filter(room_users::room_server_id.eq(room_server_id));
if !diesel_exists!(query, &mut connect()?)? {
let content = DeviceListUpdateContent::new(
sender_id.to_owned(),
device_id.to_owned(),
data::next_sn()? as u64,
);
let edu = Edu::DeviceListUpdate(content);
send_edu_server(room_server_id, &edu)?;
}
}
Ok(JoinRoomResBody::new(room_id.to_owned()))
}
pub async fn get_first_user_can_issue_invite(
room_id: &RoomId,
invitee_id: &UserId,
restriction_rooms: &[OwnedRoomId],
) -> AppResult<OwnedUserId> {
if restriction_rooms.iter().any(|restriction_room_id| {
room::user::is_joined(invitee_id, restriction_room_id).unwrap_or(false)
}) {
for joined_user in room::joined_users(room_id, None)? {
if joined_user.server_name() == config::get().server_name
&& room::user_can_invite(room_id, &joined_user, invitee_id).await
{
return Ok(joined_user);
}
}
}
Err(MatrixError::not_found("no user can issue invite in this room").into())
}
pub async fn get_users_can_issue_invite(
room_id: &RoomId,
invitee_id: &UserId,
restriction_rooms: &[OwnedRoomId],
) -> AppResult<Vec<OwnedUserId>> {
let mut users = vec![];
if restriction_rooms.iter().any(|restriction_room_id| {
room::user::is_joined(invitee_id, restriction_room_id).unwrap_or(false)
}) {
for joined_user in room::joined_users(room_id, None)? {
if joined_user.server_name() == config::get().server_name
&& room::user_can_invite(room_id, &joined_user, invitee_id).await
{
users.push(joined_user);
}
}
}
Ok(users)
}
async fn make_join_request(
user_id: &UserId,
room_id: &RoomId,
servers: &[OwnedServerName],
) -> AppResult<(MakeJoinResBody, OwnedServerName)> {
let mut last_join_error = Err(StatusError::bad_request()
.brief("no server available to assist in joining")
.into());
for remote_server in servers {
if remote_server == &config::get().server_name {
continue;
}
info!("asking {remote_server} for make_join");
let make_join_request = crate::core::federation::membership::make_join_request(
&remote_server.origin().await,
MakeJoinReqArgs {
room_id: room_id.to_owned(),
user_id: user_id.to_owned(),
ver: config::supported_room_versions(),
},
)?
.into_inner();
let make_join_response =
crate::sending::send_federation_request(remote_server, make_join_request, None).await;
match make_join_response {
Ok(make_join_response) => {
let res_body = make_join_response.json::<MakeJoinResBody>().await;
last_join_error = res_body
.map(|r| (r, remote_server.clone()))
.map_err(Into::into);
}
Err(e) => {
tracing::error!("make_join_request failed: {e:?}");
last_join_error = Err(e);
}
}
if last_join_error.is_ok() {
break;
}
}
last_join_error
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/membership/invite.rs | crates/server/src/membership/invite.rs | use crate::core::events::TimelineEventType;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::federation::membership::InviteUserResBodyV2;
use crate::core::identifiers::*;
use crate::core::serde::to_raw_json_value;
use crate::event::{PduBuilder, gen_event_id_canonical_json, handler};
use crate::membership::federation::membership::{InviteUserReqArgs, InviteUserReqBodyV2};
use crate::room::{state, timeline};
use crate::{AppResult, GetUrlOrigin, IsRemoteOrLocal, MatrixError, data, room, sending};
pub async fn invite_user(
inviter_id: &UserId,
invitee_id: &UserId,
room_id: &RoomId,
reason: Option<String>,
is_direct: bool,
) -> AppResult<()> {
if !room::user::is_joined(inviter_id, room_id)? {
return Err(MatrixError::forbidden(
"you must be joined in the room you are trying to invite from",
None,
)
.into());
}
if !room::user_can_invite(room_id, inviter_id, invitee_id).await {
return Err(MatrixError::forbidden("you are not allowed to invite this user", None).into());
}
let conf = crate::config::get();
if invitee_id.server_name().is_remote() {
let (pdu, pdu_json, invite_room_state) = {
let content = RoomMemberEventContent {
avatar_url: None,
display_name: None,
is_direct: Some(is_direct),
membership: MembershipState::Invite,
third_party_invite: None,
blurhash: None,
reason,
join_authorized_via_users_server: None,
extra_data: Default::default(),
};
let state_lock = crate::room::lock_state(room_id).await;
let (pdu, pdu_json, _event_guard) = PduBuilder::state(invitee_id.to_string(), &content)
.hash_sign_save(
inviter_id,
room_id,
crate::room::get_version(room_id)
.as_ref()
.unwrap_or(&conf.default_room_version),
&state_lock,
)
.await?;
drop(state_lock);
let invite_room_state = state::summary_stripped(&pdu)?;
(pdu, pdu_json, invite_room_state)
};
let room_version_id = room::get_version(room_id)?;
crate::membership::update_membership(
&pdu.event_id,
pdu.event_sn,
room_id,
invitee_id,
MembershipState::Invite,
inviter_id,
Some(invite_room_state.clone()),
)?;
let invite_request = crate::core::federation::membership::invite_user_request_v2(
&invitee_id.server_name().origin().await,
InviteUserReqArgs {
room_id: room_id.to_owned(),
event_id: (*pdu.event_id).to_owned(),
},
InviteUserReqBodyV2 {
room_version: room_version_id.clone(),
event: sending::convert_to_outgoing_federation_event(pdu_json.clone()),
invite_room_state,
via: state::servers_route_via(room_id).ok(),
},
)?
.into_inner();
let send_join_response =
sending::send_federation_request(invitee_id.server_name(), invite_request, None)
.await?
.json::<InviteUserResBodyV2>()
.await?;
// We do not add the event_id field to the pdu here because of signature and hashes checks
let (event_id, value) =
gen_event_id_canonical_json(&send_join_response.event, &room_version_id).map_err(
|e| {
tracing::error!("could not convert event to canonical json: {e}");
MatrixError::invalid_param("could not convert event to canonical json")
},
)?;
if *pdu.event_id != *event_id {
warn!(
"server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}",
invitee_id.server_name(),
pdu_json,
value
);
return Err(MatrixError::bad_json(format!(
"server `{}` sent event with wrong event id",
invitee_id.server_name()
))
.into());
}
let origin: OwnedServerName = serde_json::from_value(
serde_json::to_value(
value
.get("origin")
.ok_or(MatrixError::bad_json("event needs an origin field"))?,
)
.expect("CanonicalJson is valid json value"),
)
.map_err(|e| {
MatrixError::bad_json(format!(
"origin field in event is not a valid server name: {e}"
))
})?;
handler::process_incoming_pdu(
&origin,
&event_id,
room_id,
&room_version_id,
value,
true,
false,
)
.await?;
return sending::send_pdu_room(
room_id,
&event_id,
&[invitee_id.server_name().to_owned()],
&[],
);
}
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_json_value(&RoomMemberEventContent {
membership: MembershipState::Invite,
display_name: data::user::display_name(invitee_id)?,
avatar_url: data::user::avatar_url(invitee_id)?,
is_direct: Some(is_direct),
third_party_invite: None,
blurhash: data::user::blurhash(invitee_id)?,
reason,
join_authorized_via_users_server: None,
extra_data: Default::default(),
})
.expect("event is valid, we just created it"),
state_key: Some(invitee_id.to_string()),
..Default::default()
},
inviter_id,
room_id,
&crate::room::get_version(room_id)?,
&room::lock_state(room_id).await,
)
.await?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/membership/knock.rs | crates/server/src/membership/knock.rs |
use salvo::http::StatusError;
use crate::core::UnixMillis;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::federation::knock::{
MakeKnockReqArgs, MakeKnockResBody, SendKnockReqArgs, SendKnockReqBody, SendKnockResBody,
send_knock_request,
};
use crate::core::identifiers::*;
use crate::core::room::JoinRule;
use crate::core::serde::{CanonicalJsonObject, CanonicalJsonValue, to_canonical_value};
use crate::data::room::NewDbEvent;
use crate::event::{PduBuilder, PduEvent, ensure_event_sn, gen_event_id};
use crate::room::timeline;
use crate::{
AppError, AppResult, GetUrlOrigin, IsRemoteOrLocal, MatrixError, SnPduEvent, config, room,
sending,
};
pub async fn knock_room(
sender_id: &UserId,
room_id: &RoomId,
reason: Option<String>,
servers: &[OwnedServerName],
) -> AppResult<Option<SnPduEvent>> {
if room::user::is_invited(sender_id, room_id)? {
warn!("{sender_id} is already invited in {room_id} but attempted to knock");
return Err(MatrixError::forbidden(
"you cannot knock on a room you are already invited/accepted to.",
None,
)
.into());
}
if room::user::is_joined(sender_id, room_id)? {
warn!("{sender_id} is already joined in {room_id} but attempted to knock");
return Err(MatrixError::forbidden(
"you cannot knock on a room you are already joined in.",
None,
)
.into());
}
if room::user::is_knocked(sender_id, room_id)? {
warn!("{sender_id} is already knocked in {room_id}");
return Ok(None);
}
if let Ok(memeber) = room::get_member(room_id, sender_id, None)
&& memeber.membership == MembershipState::Ban
{
warn!("{sender_id} is banned from {room_id} but attempted to knock");
return Err(MatrixError::forbidden(
"you cannot knock on a room you are banned from.",
None,
)
.into());
}
let conf = config::get();
if room::is_server_joined(&conf.server_name, room_id).unwrap_or(false) {
use RoomVersionId::*;
info!("we can knock locally");
let room_version = room::get_version(room_id)?;
if matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6) {
return Err(MatrixError::forbidden(
"this room version does not support knocking",
None,
)
.into());
}
let join_rule = room::get_join_rule(room_id)?;
if !matches!(
join_rule,
JoinRule::Invite | JoinRule::Knock | JoinRule::KnockRestricted(..)
) {
return Err(MatrixError::forbidden("this room does not support knocking", None).into());
}
let content = RoomMemberEventContent {
display_name: crate::data::user::display_name(sender_id).ok().flatten(),
avatar_url: crate::data::user::avatar_url(sender_id).ok().flatten(),
blurhash: crate::data::user::blurhash(sender_id).ok().flatten(),
reason: reason.clone(),
..RoomMemberEventContent::new(MembershipState::Knock)
};
// Try normal knock first
match timeline::build_and_append_pdu(
PduBuilder::state(sender_id.to_string(), &content),
sender_id,
room_id,
&crate::room::get_version(room_id)?,
&room::lock_state(room_id).await,
)
.await
{
Ok(pdu) => {
if let Err(e) = sending::send_pdu_room(
room_id,
&pdu.event_id,
&[sender_id.server_name().to_owned()],
&[],
) {
error!("failed to notify banned user server: {e}");
}
return Ok(Some(pdu));
}
Err(e) => {
tracing::error!("failed to knock room {room_id} with conflict error: {e}");
if servers.is_empty() || servers.iter().all(|s| s.is_local()) {
return Err(e);
}
}
}
}
info!("knocking {room_id} over federation");
let (make_knock_response, remote_server) =
make_knock_request(sender_id, room_id, servers).await?;
info!("make_knock finished");
let room_version = make_knock_response.room_version;
if !config::supports_room_version(&room_version) {
return Err(StatusError::internal_server_error()
.brief("remote room version {room_version} is not supported by palpo")
.into());
}
crate::room::ensure_room(room_id, &room_version)?;
let mut knock_event_stub: CanonicalJsonObject =
serde_json::from_str(make_knock_response.event.get()).map_err(|e| {
StatusError::internal_server_error().brief(format!(
"invalid make_knock event json received from server: {e:?}"
))
})?;
// knock_event_stub.insert(
// "origin".to_owned(),
// CanonicalJsonValue::String(conf.server_name.as_str().to_owned()),
// );
knock_event_stub.insert(
"content".to_owned(),
to_canonical_value(RoomMemberEventContent {
display_name: crate::data::user::display_name(sender_id).ok().flatten(),
avatar_url: crate::data::user::avatar_url(sender_id).ok().flatten(),
blurhash: crate::data::user::blurhash(sender_id).ok().flatten(),
reason,
..RoomMemberEventContent::new(MembershipState::Knock)
})
.expect("event is valid, we just created it"),
);
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
crate::server_key::hash_and_sign_event(&mut knock_event_stub, &room_version)?;
// Generate event id
let event_id = gen_event_id(&knock_event_stub, &room_version)?;
// Add event_id
knock_event_stub.insert(
"event_id".to_owned(),
CanonicalJsonValue::String(event_id.clone().into()),
);
// It has enough fields to be called a proper event now
let knock_event = knock_event_stub;
info!("asking {remote_server} for send_knock in room {room_id}");
let send_knock_request = send_knock_request(
&remote_server.origin().await,
SendKnockReqArgs {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
},
SendKnockReqBody::new(crate::sending::convert_to_outgoing_federation_event(
knock_event.clone(),
)),
)?
.into_inner();
let _send_knock_body =
crate::sending::send_federation_request(&remote_server, send_knock_request, None)
.await?
.json::<SendKnockResBody>()
.await?;
info!("send knock finished");
info!("parsing knock event");
let parsed_knock_pdu = PduEvent::from_canonical_object(room_id, &event_id, knock_event.clone())
.map_err(|e| {
StatusError::internal_server_error().brief(format!("invalid knock event PDU: {e:?}"))
})?;
info!("going through send_knock response knock state events");
info!("appending room knock event locally");
let event_id = parsed_knock_pdu.event_id.clone();
let (event_sn, event_guard) = ensure_event_sn(room_id, &event_id)?;
NewDbEvent {
id: event_id.to_owned(),
sn: event_sn,
ty: MembershipState::Knock.to_string(),
room_id: room_id.to_owned(),
unrecognized_keys: None,
depth: parsed_knock_pdu.depth as i64,
topological_ordering: parsed_knock_pdu.depth as i64,
stream_ordering: event_sn,
origin_server_ts: UnixMillis::now(),
received_at: None,
sender_id: Some(sender_id.to_owned()),
contains_url: false,
worker_id: None,
state_key: Some(sender_id.to_string()),
is_outlier: true,
soft_failed: false,
is_rejected: false,
rejection_reason: None,
}
.save()?;
let knock_pdu = SnPduEvent {
pdu: parsed_knock_pdu,
event_sn,
is_outlier: false,
soft_failed: false,
is_backfill: false,
};
timeline::append_pdu(&knock_pdu, knock_event, &room::lock_state(room_id).await).await?;
drop(event_guard);
Ok(Some(knock_pdu))
}
async fn make_knock_request(
sender_id: &UserId,
room_id: &RoomId,
servers: &[OwnedServerName],
) -> AppResult<(MakeKnockResBody, OwnedServerName)> {
let mut make_knock_response_and_server = Err(AppError::HttpStatus(
StatusError::internal_server_error().brief("no server available to assist in knocking"),
));
let mut make_knock_counter: usize = 0;
for remote_server in servers {
if remote_server.is_local() {
continue;
}
info!("asking {remote_server} for make_knock ({make_knock_counter})");
let request = crate::core::federation::knock::make_knock_request(
&remote_server.origin().await,
MakeKnockReqArgs {
room_id: room_id.to_owned(),
user_id: sender_id.to_owned(),
ver: config::supported_room_versions(),
},
)?
.into_inner();
let make_knock_response =
crate::sending::send_federation_request(remote_server, request, None)
.await?
.json::<MakeKnockResBody>()
.await
.map_err(Into::into);
trace!("Make knock response: {make_knock_response:?}");
make_knock_counter = make_knock_counter.saturating_add(1);
make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone()));
if make_knock_response_and_server.is_ok() {
break;
}
if make_knock_counter > 40 {
warn!(
"50 servers failed to provide valid make_knock response, assuming no server can \
assist in knocking."
);
make_knock_response_and_server = Err(StatusError::internal_server_error()
.brief("no server available to assist in knocking")
.into());
return make_knock_response_and_server;
}
}
make_knock_response_and_server
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/membership/leave.rs | crates/server/src/membership/leave.rs |
use std::collections::HashSet;
use salvo::http::StatusError;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::events::{StateEventType, TimelineEventType};
use crate::core::federation::membership::{MakeLeaveResBody, SendLeaveReqBody, make_leave_request};
use crate::core::identifiers::*;
use crate::core::serde::{CanonicalJsonObject, CanonicalJsonValue, to_raw_json_value};
use crate::core::{Seqnum, UnixMillis};
use crate::data::room::{DbEventData, NewDbEvent};
use crate::event::{PduBuilder, ensure_event_sn};
use crate::membership::federation::membership::{SendLeaveReqArgsV2, send_leave_request_v2};
use crate::room::{self, state, timeline};
use crate::{
AppError, AppResult, GetUrlOrigin, MatrixError, PduEvent, SnPduEvent, config, data, membership,
};
// Make a user leave all their joined rooms
pub async fn leave_all_rooms(user_id: &UserId) -> AppResult<()> {
let all_room_ids = data::user::joined_rooms(user_id)?
.into_iter()
.chain(
data::user::invited_rooms(user_id, 0)?
.into_iter()
.map(|t| t.0),
)
.collect::<Vec<_>>();
for room_id in all_room_ids {
leave_room(user_id, &room_id, None).await.ok();
}
Ok(())
}
pub async fn leave_room(
user_id: &UserId,
room_id: &RoomId,
reason: Option<String>,
) -> AppResult<()> {
// Ask a remote server if we don't have this room
let conf = config::get();
if room::is_server_joined(&conf.server_name, room_id)? {
//If only this server in room, leave locally.
if let Err(e) = leave_room_local(user_id, room_id, reason.clone()).await {
warn!("failed to leave room {} locally: {}", user_id, e);
} else {
return Ok(());
}
}
match leave_room_remote(user_id, room_id).await {
Ok((event_id, event_sn)) => {
let last_state = state::get_user_state(user_id, room_id)?;
// We always drop the invite, we can't rely on other servers
membership::update_membership(
&event_id,
event_sn,
room_id,
user_id,
MembershipState::Leave,
user_id,
last_state,
)?;
Ok(())
}
Err(e) => {
warn!("failed to leave room {} remotely: {}", user_id, e);
if !room::has_any_other_server(room_id, &conf.server_name)? {
leave_room_local(user_id, room_id, reason).await?;
Ok(())
} else {
Err(e)
}
}
}
}
async fn leave_room_local(
user_id: &UserId,
room_id: &RoomId,
reason: Option<String>,
) -> AppResult<(OwnedEventId, Seqnum)> {
let member_event =
room::get_state(room_id, &StateEventType::RoomMember, user_id.as_str(), None)?;
let mut event_content = member_event
.get_content::<RoomMemberEventContent>()
.map_err(|_| AppError::public("invalid member event in database"))?;
let just_invited = event_content.membership == MembershipState::Invite;
event_content.membership = MembershipState::Leave;
event_content.reason = reason;
event_content.join_authorized_via_users_server = None;
let pdu = timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_json_value(&event_content).expect("event is valid, we just created it"),
state_key: Some(user_id.to_string()),
..Default::default()
},
user_id,
room_id,
&crate::room::get_version(room_id)?,
&room::lock_state(room_id).await,
)
.await?;
if just_invited && member_event.sender.server_name() != config::server_name() {
let _ = crate::sending::send_pdu_room(
room_id,
&pdu.event_id,
&[member_event.sender.server_name().to_owned()],
&[],
);
} else {
let _ = crate::sending::send_pdu_room(room_id, &pdu.event_id, &[], &[]);
}
Ok((pdu.event_id.clone(), pdu.event_sn))
}
async fn leave_room_remote(
user_id: &UserId,
room_id: &RoomId,
) -> AppResult<(OwnedEventId, Seqnum)> {
let mut make_leave_response_and_server =
Err(AppError::public("no server available to assist in leaving"));
let invite_state = state::get_user_state(user_id, room_id)?
.ok_or(MatrixError::bad_state("user is not invited"))?;
let servers: HashSet<_> = invite_state
.iter()
.filter_map(|event| serde_json::from_str(event.as_str()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned())
.collect();
for remote_server in servers {
let request = make_leave_request(
&room_id
.server_name()
.map_err(AppError::internal)?
.origin()
.await,
room_id,
user_id,
)?
.into_inner();
let make_leave_response = crate::sending::send_federation_request(
room_id.server_name().map_err(AppError::internal)?,
request,
None,
)
.await?
.json::<MakeLeaveResBody>()
.await;
make_leave_response_and_server = make_leave_response
.map(|r| (r, remote_server))
.map_err(Into::into);
if make_leave_response_and_server.is_ok() {
break;
}
}
let (make_leave_response, remote_server) = make_leave_response_and_server?;
let room_version_id = match make_leave_response.room_version {
Some(version) if config::supported_room_versions().contains(&version) => version,
_ => return Err(AppError::public("room version is not supported")),
};
let mut leave_event_stub =
serde_json::from_str::<CanonicalJsonObject>(make_leave_response.event.get())
.map_err(|_| AppError::public("invalid make_leave event json received from server"))?;
// TODO: Is origin needed?
leave_event_stub.insert(
"origin".to_owned(),
CanonicalJsonValue::String(config::get().server_name.as_str().to_owned()),
);
if !leave_event_stub.contains_key("origin_server_ts") {
leave_event_stub.insert(
"origin_server_ts".to_owned(),
CanonicalJsonValue::Integer(UnixMillis::now().get() as i64),
);
}
// We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms
leave_event_stub.remove("event_id");
// In order to create a compatible ref hash (EventID) the `hashes` field needs to be present
crate::server_key::hash_and_sign_event(&mut leave_event_stub, &room_version_id)
.expect("event is valid, we just created it");
// Generate event id
let event_id = crate::event::gen_event_id(&leave_event_stub, &room_version_id)?;
// Add event_id back
leave_event_stub.insert(
"event_id".to_owned(),
CanonicalJsonValue::String(event_id.as_str().to_owned()),
);
let (event_sn, event_guard) = ensure_event_sn(room_id, &event_id)?;
NewDbEvent {
id: event_id.to_owned(),
sn: event_sn,
ty: MembershipState::Leave.to_string(),
room_id: room_id.to_owned(),
unrecognized_keys: None,
depth: 0,
topological_ordering: 0,
stream_ordering: event_sn,
origin_server_ts: UnixMillis::now(),
received_at: None,
sender_id: Some(user_id.to_owned()),
contains_url: false,
worker_id: None,
state_key: Some(user_id.to_string()),
is_outlier: false,
soft_failed: false,
is_rejected: false,
rejection_reason: None,
}
.save()?;
DbEventData {
event_id: event_id.clone(),
event_sn,
room_id: room_id.to_owned(),
internal_metadata: None,
json_data: serde_json::to_value(&leave_event_stub)?,
format_version: None,
}
.save()?;
let parsed_leave_pdu =
PduEvent::from_canonical_object(room_id, &event_id, leave_event_stub.clone()).map_err(
|e| {
StatusError::internal_server_error()
.brief(format!("invalid leave event PDU: {e:?}"))
},
)?;
let leave_pdu = SnPduEvent {
pdu: parsed_leave_pdu,
event_sn,
is_outlier: false,
soft_failed: false,
is_backfill: false,
};
timeline::append_pdu(
&leave_pdu,
leave_event_stub.clone(),
&room::lock_state(room_id).await,
)
.await?;
drop(event_guard);
// It has enough fields to be called a proper event now
let leave_event = leave_event_stub;
let request = send_leave_request_v2(
&remote_server.origin().await,
SendLeaveReqArgsV2 {
room_id: room_id.to_owned(),
event_id: event_id.clone(),
},
SendLeaveReqBody(crate::sending::convert_to_outgoing_federation_event(
leave_event.clone(),
)),
)?
.into_inner();
crate::sending::send_federation_request(&remote_server, request, None).await?;
Ok((event_id, event_sn))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/hoops/auth.rs | crates/server/src/hoops/auth.rs | use std::{collections::BTreeMap, iter::FromIterator};
use diesel::prelude::*;
use salvo::http::headers::{HeaderMapExt, authorization::Authorization};
use salvo::prelude::*;
use crate::core::federation::authentication::XMatrix;
use crate::core::serde::CanonicalJsonValue;
use crate::core::signatures;
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::{DbAccessToken, DbUser, DbUserDevice};
use crate::exts::DepotExt;
use crate::server_key::{PubKeyMap, PubKeys};
use crate::{AppResult, AuthArgs, AuthedInfo, MatrixError, config};
#[handler]
pub async fn auth_by_access_token_or_signatures(
aa: AuthArgs,
req: &mut Request,
depot: &mut Depot,
) -> AppResult<()> {
if let Some(authorization) = &aa.authorization {
if authorization.starts_with("Bearer ") {
auth_by_access_token_inner(aa, depot).await
} else {
auth_by_signatures_inner(req, depot).await
}
} else {
Err(MatrixError::missing_token("Missing token.").into())
}
}
#[handler]
pub async fn auth_by_access_token(aa: AuthArgs, depot: &mut Depot) -> AppResult<()> {
auth_by_access_token_inner(aa, depot).await
}
#[handler]
pub async fn auth_by_signatures(
_aa: AuthArgs,
req: &mut Request,
depot: &mut Depot,
) -> AppResult<()> {
auth_by_signatures_inner(req, depot).await
}
async fn auth_by_access_token_inner(aa: AuthArgs, depot: &mut Depot) -> AppResult<()> {
let token = aa.require_access_token()?;
let access_token = user_access_tokens::table
.filter(user_access_tokens::token.eq(token))
.first::<DbAccessToken>(&mut connect()?)
.ok();
if let Some(access_token) = access_token {
let user = users::table
.find(&access_token.user_id)
.first::<DbUser>(&mut connect()?)
.map_err(|_| MatrixError::unknown_token("User not found", true))?;
let user_device = user_devices::table
.filter(user_devices::device_id.eq(&access_token.device_id))
.filter(user_devices::user_id.eq(&user.id))
.first::<DbUserDevice>(&mut connect()?)
.map_err(|_| MatrixError::unknown_token("User device not found", true))?;
depot.inject(AuthedInfo {
user,
user_device,
access_token_id: Some(access_token.id),
appservice: None,
});
Ok(())
} else {
let appservices = crate::appservices();
for appservice in appservices {
if appservice.as_token == token {
let user = users::table
.filter(users::appservice_id.eq(&appservice.id))
.first::<DbUser>(&mut connect()?)?;
let user_device = user_devices::table
.filter(user_devices::user_id.eq(&user.id))
.first::<DbUserDevice>(&mut connect()?)?;
depot.inject(AuthedInfo {
user,
user_device,
access_token_id: None,
appservice: Some(appservice.to_owned().try_into()?),
});
return Ok(());
}
}
Err(MatrixError::unknown_token("Unknown access token.", true).into())
}
}
async fn auth_by_signatures_inner(req: &mut Request, depot: &mut Depot) -> AppResult<()> {
let Some(Authorization(x_matrix)) = req.headers().typed_get::<Authorization<XMatrix>>() else {
warn!("Missing or invalid Authorization header");
return Err(MatrixError::forbidden("Missing or invalid authorization header", None).into());
};
let origin_signatures = BTreeMap::from_iter([(
x_matrix.key.as_str().to_owned(),
CanonicalJsonValue::String(x_matrix.sig.to_string()),
)]);
let origin = &x_matrix.origin;
let signatures = BTreeMap::from_iter([(
origin.as_str().to_owned(),
CanonicalJsonValue::Object(origin_signatures),
)]);
let mut authorization = BTreeMap::from_iter([
(
"destination".to_owned(),
CanonicalJsonValue::String(config::get().server_name.as_str().to_owned()),
),
(
"method".to_owned(),
CanonicalJsonValue::String(req.method().to_string()),
),
(
"origin".to_owned(),
CanonicalJsonValue::String(origin.as_str().to_owned()),
),
(
"uri".to_owned(),
format!(
"{}{}",
req.uri().path(),
req.uri()
.query()
.map(|q| format!("?{q}"))
.unwrap_or_default()
)
.into(),
),
(
"signatures".to_owned(),
CanonicalJsonValue::Object(signatures),
),
]);
let json_body = req
.payload()
.await
.ok()
.and_then(|payload| serde_json::from_slice::<CanonicalJsonValue>(payload).ok());
if let Some(json_body) = &json_body {
authorization.insert("content".to_owned(), json_body.clone());
};
let key = crate::server_key::get_verify_key(origin, &x_matrix.key).await?;
let keys: PubKeys = [(x_matrix.key.to_string(), key.key)].into();
let keys: PubKeyMap = [(origin.as_str().into(), keys)].into();
if let Err(e) = signatures::verify_json(&keys, &authorization) {
warn!(
"Failed to verify json request from {}: {}\n{:?}",
x_matrix.origin, e, authorization
);
if req.uri().to_string().contains('@') {
warn!(
"Request uri contained '@' character. Make sure your \
reverse proxy gives Palpo the raw uri (apache: use \
nocanon)"
);
}
Err(MatrixError::forbidden("Failed to verify X-Matrix signatures.", None).into())
} else {
depot.set_origin(origin.to_owned());
Ok(())
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/user.rs | crates/server/src/admin/user.rs | mod cmd;
use cmd::*;
use clap::Subcommand;
use crate::AppResult;
use crate::core::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId};
use crate::macros::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub(crate) enum UserCommand {
/// - Create a new user
#[clap(alias = "create")]
CreateUser {
/// Username of the new user
username: String,
/// Password of the new user, if unspecified one is generated
password: Option<String>,
},
/// - Reset user password
ResetPassword {
/// Username of the user for whom the password should be reset
username: String,
/// New password for the user, if unspecified one is generated
password: Option<String>,
},
/// - Deactivate a user
///
/// User will be removed from all rooms by default.
/// Use --no-leave-rooms to not leave all rooms by default.
Deactivate {
#[arg(short, long)]
no_leave_rooms: bool,
user_id: String,
},
/// - Deactivate a list of users
///
/// Recommended to use in conjunction with list-local-users.
///
/// Users will be removed from joined rooms by default.
///
/// Can be overridden with --no-leave-rooms.
///
/// Removing a mass amount of users from a room may cause a significant
/// amount of leave events. The time to leave rooms may depend significantly
/// on joined rooms and servers.
///
/// This command needs a newline separated list of users provided in a
/// Markdown code block below the command.
DeactivateAll {
#[arg(short, long)]
/// Does not leave any rooms the user is in on deactivation
no_leave_rooms: bool,
#[arg(short, long)]
/// Also deactivate admin accounts and will assume leave all rooms too
force: bool,
},
/// - List local users in the database
#[clap(alias = "list")]
ListUsers,
/// - Lists all the rooms (local and remote) that the specified user is
/// joined in
ListJoinedRooms { user_id: String },
/// - Manually join a local user to a room.
ForceJoinRoom {
user_id: String,
room_id: OwnedRoomOrAliasId,
},
/// - Manually leave a local user from a room.
ForceLeaveRoom {
user_id: String,
room_id: OwnedRoomOrAliasId,
},
/// - Forces the specified user to drop their power levels to the room
/// default, if their permissions allow and the auth check permits
ForceDemote {
user_id: String,
room_id: OwnedRoomOrAliasId,
},
/// - Grant server-admin privileges to a user.
MakeUserAdmin { user_id: String },
/// - Puts a room tag for the specified user and room ID.
///
/// This is primarily useful if you'd like to set your admin room
/// to the special "System Alerts" section in Element as a way to
/// permanently see your admin room without it being buried away in your
/// favourites or rooms. To do this, you would pass your user, your admin
/// room's internal ID, and the tag name `m.server_notice`.
PutRoomTag {
user_id: String,
room_id: OwnedRoomId,
tag: String,
},
/// - Deletes the room tag for the specified user and room ID
DeleteRoomTag {
user_id: String,
room_id: OwnedRoomId,
tag: String,
},
/// - Gets all the room tags for the specified user and room ID
GetRoomTags {
user_id: String,
room_id: OwnedRoomId,
},
/// - Attempts to forcefully redact the specified event ID from the sender
/// user
///
/// This is only valid for local users
RedactEvent { event_id: OwnedEventId },
/// - Force joins a specified list of local users to join the specified
/// room.
///
/// Specify a codeblock of usernames.
///
/// At least 1 server admin must be in the room to reduce abuse.
///
/// Requires the `--yes-i-want-to-do-this` flag.
ForceJoinListOfLocalUsers {
room_id: OwnedRoomOrAliasId,
#[arg(long)]
yes_i_want_to_do_this: bool,
},
/// - Force joins all local users to the specified room.
///
/// At least 1 server admin must be in the room to reduce abuse.
///
/// Requires the `--yes-i-want-to-do-this` flag.
ForceJoinAllLocalUsers {
room_id: OwnedRoomOrAliasId,
#[arg(long)]
yes_i_want_to_do_this: bool,
},
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/appservice.rs | crates/server/src/admin/appservice.rs | use clap::Subcommand;
use crate::admin::Context;
use crate::core::appservice::Registration;
use crate::macros::admin_command_dispatch;
use crate::{AppError, AppResult};
#[derive(Debug, Subcommand)]
#[admin_command_dispatch]
pub(crate) enum AppserviceCommand {
/// - Register an appservice using its registration YAML
///
/// This command needs a YAML generated by an appservice (such as a bridge),
/// which must be provided in a Markdown code block below the command.
///
/// Registering a new bridge using the ID of an existing bridge will replace
/// the old one.
Register,
/// - Unregister an appservice using its ID
///
/// You can find the ID using the `list-appservices` command.
Unregister {
/// The appservice to unregister
appservice_identifier: String,
},
/// - Show an appservice's config using its ID
///
/// You can find the ID using the `list-appservices` command.
#[clap(alias("show"))]
ShowAppserviceConfig {
/// The appservice to show
appservice_identifier: String,
},
/// - List all the currently registered appservices
#[clap(alias("list"))]
ListRegistered,
}
pub(super) async fn register(ctx: &Context<'_>) -> AppResult<()> {
let body = &ctx.body;
let body_len = ctx.body.len();
if body_len < 2
|| !body[0].trim().starts_with("```")
|| body.last().unwrap_or(&"").trim() != "```"
{
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let range = 1..(body_len - 1);
let appservice_config_body = body[range].join("\n");
let parsed_config = serde_saphyr::from_str::<Registration>(&appservice_config_body);
match parsed_config {
Err(e) => {
return Err(AppError::public(format!(
"Could not parse appservice config as YAML: {e}"
)));
}
Ok(registration) => match crate::appservice::register_appservice(registration.clone())
.map(|_| registration.id)
{
Err(e) => {
return Err(AppError::public(format!(
"Failed to register appservice: {e}"
)));
}
Ok(id) => write!(ctx, "Appservice registered with ID: {id}"),
},
}
.await
}
pub(super) async fn unregister(ctx: &Context<'_>, appservice_identifier: String) -> AppResult<()> {
match crate::appservice::unregister_appservice(&appservice_identifier) {
Err(e) => {
return Err(AppError::public(format!(
"Failed to unregister appservice: {e}"
)));
}
Ok(()) => write!(ctx, "Appservice unregistered."),
}
.await
}
pub(super) async fn show_appservice_config(
ctx: &Context<'_>,
appservice_identifier: String,
) -> AppResult<()> {
match crate::appservice::get_registration(&appservice_identifier)? {
None => return Err(AppError::public("Appservice does not exist.")),
Some(config) => {
let config_str = serde_saphyr::to_string(&config)?;
write!(
ctx,
"Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```"
)
}
}
.await
}
pub(super) async fn list_registered(ctx: &Context<'_>) -> AppResult<()> {
let appservices = crate::appservice::all()?;
let list = appservices.keys().collect::<Vec<_>>();
let len = appservices.len();
{ write!(ctx, "Appservices ({len}): {list:?}") }.await?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/media.rs | crates/server/src/admin/media.rs | mod cmd;
use cmd::*;
use clap::Subcommand;
use crate::AppResult;
use crate::core::{OwnedEventId, OwnedMxcUri};
use crate::macros::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub(crate) enum MediaCommand {
/// - Deletes a single media file from our database and on the filesystem
/// via a single MXC URL or event ID (not redacted)
DeleteMedia {
/// The MXC URL to delete
#[arg(long)]
mxc: Option<OwnedMxcUri>,
/// - The message event ID which contains the media and thumbnail MXC
/// URLs
#[arg(long)]
event_id: Option<OwnedEventId>,
},
/// - Deletes a codeblock list of MXC URLs from our database and on the
/// filesystem. This will always ignore errors.
DeleteMediaList,
/// - Deletes all remote (and optionally local) media created before or
/// after [duration] time using filesystem metadata first created at date,
/// or fallback to last modified date. This will always ignore errors by
/// default.
DeletePastRemoteMedia {
/// - The relative time (e.g. 30s, 5m, 7d) within which to search
duration: String,
/// - Only delete media created before [duration] ago
#[arg(long, short)]
before: bool,
/// - Only delete media created after [duration] ago
#[arg(long, short)]
after: bool,
/// - Long argument to additionally delete local media
#[arg(long)]
yes_i_want_to_delete_local_media: bool,
},
// /// - Deletes all the local media from a local user on our server. This will
// /// always ignore errors by default.
// DeleteAllFromUser { username: String },
// /// - Deletes all remote media from the specified remote server. This will
// /// always ignore errors by default.
// DeleteAllFromServer {
// server_name: OwnedServerName,
// /// Long argument to delete local media
// #[arg(long)]
// yes_i_want_to_delete_local_media: bool,
// },
GetFileInfo {
/// The MXC URL to lookup info for.
mxc: OwnedMxcUri,
},
// GetRemoteFile {
// /// The MXC URL to fetch
// mxc: OwnedMxcUri,
// #[arg(short, long)]
// server: Option<OwnedServerName>,
// #[arg(short, long, default_value("10000"))]
// timeout: u32,
// },
// GetRemoteThumbnail {
// /// The MXC URL to fetch
// mxc: OwnedMxcUri,
// #[arg(short, long)]
// server: Option<OwnedServerName>,
// #[arg(short, long, default_value("10000"))]
// timeout: u32,
// #[arg(short, long, default_value("800"))]
// width: u32,
// #[arg(short, long, default_value("800"))]
// height: u32,
// },
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/processor.rs | crates/server/src/admin/processor.rs | use std::{
fmt::Write,
mem::take,
panic::AssertUnwindSafe,
sync::{Arc, Mutex},
time::SystemTime,
};
use clap::{CommandFactory, Parser};
use futures_util::{AsyncWriteExt, future::FutureExt, io::BufWriter};
use tracing::Level;
use tracing_subscriber::{EnvFilter, filter::LevelFilter};
use crate::admin::{
AdminCommand, CommandInput, CommandOutput, Context, ProcessorFuture, ProcessorResult,
};
use crate::core::{
EventId,
events::{
relation::InReplyTo,
room::message::{Relation::Reply, RoomMessageEventContent},
},
};
use crate::{
AppError, AppResult, config,
logging::{
capture,
capture::Capture,
fmt::{markdown_table, markdown_table_head},
},
utils::string::{collect_stream, common_prefix},
};
#[must_use]
pub(super) fn complete(line: &str) -> String {
complete_command(AdminCommand::command(), line)
}
#[must_use]
pub(super) fn dispatch(command: CommandInput) -> ProcessorFuture {
Box::pin(handle_command(command))
}
#[tracing::instrument(skip_all, name = "admin")]
async fn handle_command(command: CommandInput) -> ProcessorResult {
AssertUnwindSafe(Box::pin(process_command(&command)))
.catch_unwind()
.await
.map_err(|e| AppError::public(format!("failed to process command: {e:?}")))
.unwrap_or_else(|error| handle_panic(&error, &command))
}
async fn process_command(input: &CommandInput) -> ProcessorResult {
let (command, args, body) = match parse(input) {
Err(error) => return Err(error),
Ok(parsed) => parsed,
};
let context = Context {
body: &body,
timer: SystemTime::now(),
reply_id: input.reply_id.as_deref(),
output: BufWriter::new(Vec::new()).into(),
};
let (result, mut logs) = process(&context, command, &args).await;
let output = &mut context.output.lock().await;
output.flush().await.expect("final flush of output stream");
let output =
String::from_utf8(take(output.get_mut())).expect("invalid utf8 in command output stream");
match result {
Ok(()) if logs.is_empty() => Ok(Some(reply(
RoomMessageEventContent::notice_markdown(output),
context.reply_id,
))),
Ok(()) => {
logs.write_str(output.as_str()).expect("output buffer");
Ok(Some(reply(
RoomMessageEventContent::notice_markdown(logs),
context.reply_id,
)))
}
Err(error) => {
write!(
&mut logs,
"Command failed with error:\n```\n{error:#?}\n```"
)
.expect("output buffer");
Err(reply(
RoomMessageEventContent::notice_markdown(logs),
context.reply_id,
))
}
}
}
#[allow(clippy::result_large_err)]
fn handle_panic(error: &AppError, command: &CommandInput) -> ProcessorResult {
let link = "Please submit a [bug report](https://github.com/matrix-construct/tuwunel/issues/new). \
🥺";
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
let content = RoomMessageEventContent::notice_markdown(msg);
error!("Panic while processing command: {error:?}");
Err(reply(content, command.reply_id.as_deref()))
}
/// Parse and process a message from the admin room
async fn process(
context: &Context<'_>,
command: AdminCommand,
args: &[String],
) -> (AppResult<()>, String) {
let (capture, logs) = capture_create(context);
let capture_scope = capture.start();
let result = Box::pin(crate::admin::process(command, context)).await;
drop(capture_scope);
debug!(
ok = result.is_ok(),
elapsed = ?context.timer.elapsed(),
command = ?args,
"command processed"
);
let mut output = String::new();
// Prepend the logs only if any were captured
let logs = logs.lock().expect("locked");
if logs.lines().count() > 2 {
writeln!(&mut output, "{logs}").expect("failed to format logs to command output");
}
drop(logs);
(result, output)
}
fn capture_create(context: &Context<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
let conf = config::get();
let env_filter = EnvFilter::try_new(&conf.admin.log_capture).unwrap_or_else(|e| {
warn!("admin_log_capture filter invalid: {e:?}");
cfg!(debug_assertions)
.then_some("debug")
.or(Some("info"))
.map(Into::into)
.expect("default capture EnvFilter")
});
let log_level = env_filter
.max_level_hint()
.and_then(LevelFilter::into_level)
.unwrap_or(Level::DEBUG);
let filter = move |data: capture::Data<'_>| {
data.level() <= log_level && data.our_modules() && data.scope.contains(&"admin")
};
let logs = Arc::new(Mutex::new(
collect_stream(|s| markdown_table_head(s)).expect("markdown table header"),
));
let capture = Capture::new(
&crate::logging::get().capture,
Some(filter),
capture::fmt(markdown_table, logs.clone()),
);
(capture, logs)
}
/// Parse chat messages from the admin room into an AdminCommand object
#[allow(clippy::result_large_err)]
fn parse(input: &CommandInput) -> Result<(AdminCommand, Vec<String>, Vec<&str>), CommandOutput> {
let lines = input.command.lines().filter(|line| !line.trim().is_empty());
let command_line = lines.clone().next().expect("command missing first line");
let body = lines.skip(1).collect();
match parse_command(command_line) {
Ok((command, args)) => Ok((command, args, body)),
Err(error) => {
let message = error
.to_string()
.replace("server.name", crate::config::server_name().as_str());
Err(reply(
RoomMessageEventContent::notice_plain(message),
input.reply_id.as_deref(),
))
}
}
}
fn parse_command(line: &str) -> Result<(AdminCommand, Vec<String>), clap::Error> {
let argv = parse_line(line);
let command = AdminCommand::try_parse_from(&argv)?;
Ok((command, argv))
}
fn complete_command(mut cmd: clap::Command, line: &str) -> String {
let argv = parse_line(line);
let mut ret = Vec::<String>::with_capacity(argv.len().saturating_add(1));
'token: for token in argv.into_iter().skip(1) {
let cmd_ = cmd.clone();
let mut choice = Vec::new();
for sub in cmd_.get_subcommands() {
let name = sub.get_name();
if *name == token {
// token already complete; recurse to subcommand
ret.push(token);
cmd.clone_from(sub);
continue 'token;
} else if name.starts_with(&token) {
// partial match; add to choices
choice.push(name);
}
}
if choice.len() == 1 {
// One choice. Add extra space because it's complete
let choice = *choice.first().expect("only choice");
ret.push(choice.to_owned());
ret.push(String::new());
} else if choice.is_empty() {
// Nothing found, return original string
ret.push(token);
} else {
// Find the common prefix
ret.push(common_prefix(&choice).into());
}
// Return from completion
return ret.join(" ");
}
// Return from no completion. Needs a space though.
ret.push(String::new());
ret.join(" ")
}
/// Parse chat messages from the admin room into an AdminCommand object
fn parse_line(command_line: &str) -> Vec<String> {
let mut argv = command_line
.split_whitespace()
.map(str::to_owned)
.collect::<Vec<String>>();
// Remove any escapes that came with a server-side escape command
if !argv.is_empty() && argv[0].ends_with("admin") {
argv[0] = argv[0].trim_start_matches('\\').into();
}
// First indice has to be "admin" but for console convenience we add it here
if !argv.is_empty() && !argv[0].ends_with("admin") && !argv[0].starts_with('@') {
argv.insert(0, "admin".to_owned());
}
// Replace `help command` with `command --help`
// Clap has a help subcommand, but it omits the long help description.
if argv.len() > 1 && argv[1] == "help" {
argv.remove(1);
argv.push("--help".to_owned());
}
// Backwards compatibility with `register_appservice`-style commands
if argv.len() > 1 && argv[1].contains('_') {
argv[1] = argv[1].replace('_', "-");
}
// Backwards compatibility with `register_appservice`-style commands
if argv.len() > 2 && argv[2].contains('_') {
argv[2] = argv[2].replace('_', "-");
}
// if the user is using the `query` command (argv[1]), replace the database
// function/table calls with underscores to match the codebase
if argv.len() > 3 && argv[1].eq("query") {
argv[3] = argv[3].replace('_', "-");
}
trace!(?command_line, ?argv, "parse");
argv
}
fn reply(
mut content: RoomMessageEventContent,
reply_id: Option<&EventId>,
) -> RoomMessageEventContent {
content.relates_to = reply_id.map(|event_id| Reply {
in_reply_to: InReplyTo {
event_id: event_id.to_owned(),
},
});
content
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/federation.rs | crates/server/src/admin/federation.rs | use clap::Subcommand;
use crate::admin::{Context, RoomInfo, get_room_info};
use crate::core::{OwnedRoomId, OwnedServerName, OwnedUserId};
use crate::macros::admin_command_dispatch;
use crate::{AppError, AppResult, config, data, sending};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub(crate) enum FederationCommand {
/// - Disables incoming federation handling for a room.
DisableRoom { room_id: OwnedRoomId },
/// - Enables incoming federation handling for a room again.
EnableRoom { room_id: OwnedRoomId },
/// - Fetch `/.well-known/matrix/support` from the specified server
///
/// Despite the name, this is not a federation endpoint and does not go
/// through the federation / server resolution process as per-spec this is
/// supposed to be served at the server_name.
///
/// Respecting homeservers put this file here for listing administration,
/// moderation, and security inquiries. This command provides a way to
/// easily fetch that information.
FetchSupportWellKnown { server_name: OwnedServerName },
/// - Lists all the rooms we share/track with the specified *remote* user
RemoteUserInRooms { user_id: OwnedUserId },
}
pub(super) async fn disable_room(ctx: &Context<'_>, room_id: OwnedRoomId) -> AppResult<()> {
crate::room::disable_room(&room_id, true)?;
ctx.write_str("Room disabled.").await
}
pub(super) async fn enable_room(ctx: &Context<'_>, room_id: OwnedRoomId) -> AppResult<()> {
crate::room::disable_room(&room_id, false)?;
ctx.write_str("Room enabled.").await
}
pub(super) async fn fetch_support_well_known(
ctx: &Context<'_>,
server_name: OwnedServerName,
) -> AppResult<()> {
let response = sending::default_client()
.get(format!("https://{server_name}/.well-known/matrix/support"))
.send()
.await?;
let text = response.text().await?;
if text.is_empty() {
return Err(AppError::public("Response text/body is empty."));
}
if text.len() > 1500 {
return Err(AppError::public(
"Response text/body is over 1500 characters, assuming no support well-known.",
));
}
let json: serde_json::Value = match serde_json::from_str(&text) {
Ok(json) => json,
Err(_) => {
return Err(AppError::public("Response text/body is not valid JSON."));
}
};
let pretty_json: String = match serde_json::to_string_pretty(&json) {
Ok(json) => json,
Err(_) => {
return Err(AppError::public("Response text/body is not valid JSON."));
}
};
ctx.write_str(&format!(
"Got JSON response:\n\n```json\n{pretty_json}\n```"
))
.await
}
pub(super) async fn remote_user_in_rooms(ctx: &Context<'_>, user_id: OwnedUserId) -> AppResult<()> {
if user_id.server_name() == config::server_name() {
return Err(AppError::public(
"User belongs to our server, please use `list-joined-rooms` user admin command \
instead.",
));
}
if !data::user::user_exists(&user_id)? {
return Err(AppError::public(
"Remote user does not exist in our database.",
));
}
let mut rooms: Vec<RoomInfo> = data::user::joined_rooms(&user_id)?
.into_iter()
.map(|room_id| get_room_info(&room_id))
.collect();
if rooms.is_empty() {
return Err(AppError::public("User is not in any rooms."));
}
rooms.sort_by_key(|r| r.joined_members);
rooms.reverse();
let num = rooms.len();
let body = rooms
.iter()
.map(
|RoomInfo {
id,
joined_members,
name,
}| format!("{id} | Members: {joined_members} | Name: {name}"),
)
.collect::<Vec<_>>()
.join("\n");
ctx.write_str(&format!(
"Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",
))
.await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/debug.rs | crates/server/src/admin/debug.rs | use std::{
collections::HashMap,
fmt::Write,
iter::once,
str::FromStr,
time::{Instant, SystemTime},
};
use futures_util::{FutureExt, StreamExt, TryStreamExt};
use serde::Serialize;
use tracing_subscriber::EnvFilter;
use crate::core::UnixMillis;
use crate::core::serde::{CanonicalJsonObject, CanonicalJsonValue, RawJsonValue};
use crate::core::{
EventId, OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
api::federation::event::get_room_state, events::AnyStateEvent,
};
use crate::{AppError, AppResult, admin::Context, config, info, event::PduEvent, room::timeline, utils};
pub(super) async fn echo(ctx: &Context<'_>, message: Vec<String>) -> AppResult<()> {
let message = message.join(" ");
ctx.write_str(&message).await
}
pub(super) async fn get_auth_chain(ctx: &Context<'_>, event_id: OwnedEventId) -> AppResult<()> {
let Ok(Some(event)) = timeline::get_pdu_json(&event_id) else {
return Err(AppError::public("Event not found."));
};
let room_id_str = event
.get("room_id")
.and_then(CanonicalJsonValue::as_str)
.ok_or_else(|| Err(AppError::public("Invalid event in database")))?;
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Err(AppError::public("Invalid room id field in event in database")))?;
let start = Instant::now();
let count = crate::room::auth_chain::get_auth_chain_ids(room_id, once(event_id.as_ref())).len();
let elapsed = start.elapsed();
let out = format!("Loaded auth chain with length {count} in {elapsed:?}");
ctx.write_str(&out).await
}
pub(super) async fn parse_pdu(ctx: &Context<'_>) -> AppResult<()> {
if ctx.body.len() < 2 || !ctx.body[0].trim().starts_with("```") || ctx.body.last().unwrap_or(&"").trim() != "```"
{
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let string = ctx.body[1..ctx.body.len().saturating_sub(1)].join("\n");
match serde_json::from_str(&string) {
Err(e) => return Err(AppError::public(format!("Invalid json in command body: {e}"))),
Ok(value) => match crate::core::signatures::reference_hash(&value, &RoomVersionId::V6) {
Err(e) => return Err(AppError::public(format!("could not parse PDU json: {e:?}"))),
Ok(hash) => {
let event_id = EventId::parse(format!("${hash}"));
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
Err(e) => {
return Err(AppError::public(format!(
"EventId: {event_id:?}\nCould not parse event: {e}"
)));
}
Ok(pdu) => write!(ctx, "EventId: {event_id:?}\n{pdu:#?}"),
}
}
},
}
.await
}
pub(super) async fn get_pdu(ctx: &Context<'_>, event_id: OwnedEventId) -> AppResult<()> {
let mut outlier = false;
let mut pdu_json = timeline::get_non_outlier_pdu_json(&event_id).await;
if pdu_json.is_err() {
outlier = true;
pdu_json = timeline::get_pdu_json(&event_id);
}
match pdu_json {
Err(_) => return Err(AppError::public("PDU not found locally.")),
Ok(json) => {
let text = serde_json::to_string_pretty(&json)?;
let msg = if outlier {
"Outlier (Rejected / Soft Failed) PDU found in our database"
} else {
"PDU found in our database"
};
write!(ctx, "{msg}\n```json\n{text}\n```",)
}
}
.await
}
pub(super) async fn fetch_remote_pdu_list(ctx: &Context<'_>, server: OwnedServerName, force: bool) -> AppResult<()> {
let conf = config::get();
if conf.enabled_federation().is_none() {
return Err(AppError::public("federation is disabled on this homeserver."));
}
if server == config::server_name() {
return Err(AppError::public(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
fetching local PDUs from the database.",
));
}
if ctx.body.len() < 2 || !ctx.body[0].trim().starts_with("```") || ctx.body.last().unwrap_or(&"").trim() != "```"
{
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let list = ctx
.body
.iter()
.collect::<Vec<_>>()
.drain(1..ctx.body.len().saturating_sub(1))
.filter_map(|pdu| EventId::parse(pdu).ok())
.collect::<Vec<_>>();
let mut failed_count: usize = 0;
let mut success_count: usize = 0;
for event_id in list {
if force {
match fetch_remote_pdu(ctx, event_id.to_owned(), server.clone()).await {
Err(e) => {
failed_count = failed_count.saturating_add(1);
crate::admin::send_text(&format!("Failed to get remote PDU, ignoring error: {e}")).await;
warn!("Failed to get remote PDU, ignoring error: {e}");
}
_ => {
success_count = success_count.saturating_add(1);
}
}
} else {
fetch_remote_pdu(ctx, event_id.to_owned(), server.clone()).await?;
success_count = success_count.saturating_add(1);
}
}
let out = format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures");
ctx.write_str(&out).await
}
pub(super) async fn fetch_remote_pdu(
ctx: &Context<'_>,
event_id: OwnedEventId,
server: OwnedServerName,
) -> AppResult<()> {
let conf = config::get();
if conf.enabled_federation().is_none() {
return Err(AppError::public("Federation is disabled on this homeserver."));
}
if server == config::server_name() {
return Err(AppError::public(format!(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
fetching local PDUs.",
)));
}
unimplemented!()
// match self
// .services
// .sending
// .send_federation_request(
// &server,
// crate::core::api::federation::event::get_event::v1::Request {
// event_id: event_id.clone(),
// include_unredacted_content: None,
// },
// )
// .await
// {
// Err(e) => {
// return Err(AppError::public(format!(
// "Remote server did not have PDU or failed sending request to remote server: {e}"
// )));
// }
// Ok(response) => {
// let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| {
// warn!(
// "Requested event ID {event_id} from server but failed to convert from \
// RawValue to CanonicalJsonObject (malformed event/response?): {e}"
// );
// AppError::public("Received response from server but failed to parse PDU")
// })?;
// trace!("Attempting to parse PDU: {:?}", &response.pdu);
// let _parsed_pdu = {
// let parsed_result = crate::parse_incoming_pdu(&response.pdu)?;
// let (event_id, value, room_id) = match parsed_result {
// Ok(t) => t,
// Err(e) => {
// warn!("Failed to parse PDU: {e}");
// info!("Full PDU: {:?}", &response.pdu);
// return Err(AppError::public(format!(
// "Failed to parse PDU remote server {server} sent us: {e}"
// )));
// }
// };
// vec![(event_id, value, room_id)]
// };
// info!("Attempting to handle event ID {event_id} as backfilled PDU");
// timeline::backfill_pdu(&server, response.pdu).await?;
// let text = serde_json::to_string_pretty(&json)?;
// let msg = "Got PDU from specified server and handled as backfilled";
// write!(ctx, "{msg}. Event body:\n```json\n{text}\n```")
// }
// }
// .await
}
pub(super) async fn get_room_state(ctx: &Context<'_>, room: OwnedRoomOrAliasId) -> AppResult<()> {
// TODO: admin
unimplemented!();
// let room_id = crate::room::alias::resolve(&room).await?;
// let room_state: Vec<RawJson<AnyStateEvent>> = crate::room::state::room_state_full_pdus(&room_id)
// .map_ok(Event::into_format)
// .try_collect()
// .await?;
// if room_state.is_empty() {
// return Err(AppError::public(
// "Unable to find room state in our database (vector is empty)",
// ));
// }
// let json = serde_json::to_string_pretty(&room_state).map_err(|e| {
// AppError::public(format!(
// "Failed to convert room state events to pretty JSON, possible invalid room state \
// events in our database {e}",
// ))
// })?;
// let out = format!("```json\n{json}\n```");
// ctx.write_str(&out).await
}
pub(super) async fn ping(ctx: &Context<'_>, server: OwnedServerName) -> AppResult<()> {
if server == config::server_name() {
return Err(AppError::public(
"Not allowed to send federation requests to ourselves.",
));
}
let timer = tokio::time::Instant::now();
unimplemented!()
// match self
// .services
// .sending
// .send_federation_request(
// &server,
// crate::core::api::federation::discovery::get_server_version::v1::Request {},
// )
// .await
// {
// Err(e) => {
// return Err(AppError::public(format!(
// "Failed sending federation request to specified server:\n\n{e}"
// )));
// }
// Ok(response) => {
// let ping_time = timer.elapsed();
// let json_text_res = serde_json::to_string_pretty(&response.server);
// let out = if let Ok(json) = json_text_res {
// format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```")
// } else {
// format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}")
// };
// write!(ctx, "{out}")
// }
// }
// .await
}
pub(super) async fn force_device_list_updates(ctx: &Context<'_>) -> AppResult<()> {
// Force E2EE device list updates for all users
for user_id in crate::data::user::all_user_ids() {
if let Err(e) = crate::user::mark_device_key_update(user_id) {
warn!("Failed to mark device key update for user {user_id}: {e}");
}
}
write!(ctx, "Marked all devices for all users as having new keys to update").await
}
pub(super) async fn change_log_level(ctx: &Context<'_>, filter: Option<String>, reset: bool) -> AppResult<()> {
let handles = &["console"];
let conf = config::get();
if reset {
let old_filter_layer = match EnvFilter::try_new(&conf.logger.level) {
Ok(s) => s,
Err(e) => {
return Err(AppError::public(format!(
"Log level from config appears to be invalid now: {e}"
)));
}
};
// TODO: This is a workaround for the fact that we cannot reload the logger
// match crate::config::get().logger.reload(&old_filter_layer, Some(handles)) {
// Err(e) => {
// return Err(AppError::public(format!(
// "Failed to modify and reload the global tracing log level: {e}"
// )));
// }
// Ok(()) => {
// let value = &conf.logger.level;
// let out = format!("Successfully changed log level back to config value {value}");
// return ctx.write_str(&out).await;
// }
// }
}
// TODO: This is a workaround for the fact that we cannot reload the logger
// if let Some(filter) = filter {
// let new_filter_layer = match EnvFilter::try_new(filter) {
// Ok(s) => s,
// Err(e) => return Err(AppError::public(format!("Invalid log level filter specified: {e}"))),
// };
// match self.services.server.log.reload.reload(&new_filter_layer, Some(handles)) {
// Ok(()) => return ctx.write_str("Successfully changed log level").await,
// Err(e) => {
// return Err(AppError::public(format!(
// "Failed to modify and reload the global tracing log level: {e}"
// )));
// }
// }
// }
Err(AppError::public("No log level was specified."))
}
pub(super) async fn sign_json(ctx: &Context<'_>) -> AppResult<()> {
if ctx.body.len() < 2 || !ctx.body[0].trim().starts_with("```") || ctx.body.last().unwrap_or(&"").trim() != "```" {
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let string = ctx.body[1..ctx.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
Err(e) => return Err(AppError::public(format!("invalid json: {e}"))),
Ok(mut value) => {
crate::server_key::sign_json(&mut value)?;
let json_text = serde_json::to_string_pretty(&value)?;
write!(ctx, "{json_text}")
}
}
.await
}
pub(super) async fn verify_json(ctx: &Context<'_>, room_version: &RoomVersionId) -> AppResult<()> {
if ctx.body.len() < 2 || !ctx.body[0].trim().starts_with("```") || ctx.body.last().unwrap_or(&"").trim() != "```" {
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let string = ctx.body[1..ctx.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str::<CanonicalJsonObject>(&string) {
Err(e) => return Err(AppError::public(format!("invalid json: {e}"))),
Ok(value) => match crate::server_key::verify_json(&value, room_version).await {
Err(e) => return Err(AppError::public(format!("signature verification failed: {e}"))),
Ok(()) => write!(ctx, "Signature correct"),
},
}
.await
}
pub(super) async fn verify_pdu(ctx: &Context<'_>, event_id: OwnedEventId, room_version: &RoomVersionId) -> AppResult<()> {
use crate::core::signatures::Verified;
let Some(mut event) = timeline::get_pdu_json(&event_id)? else {
return Err(AppError::public("pdu not found in our database."));
};
event.remove("event_id");
let msg = match crate::server_key::verify_event(&event, room_version).await {
Err(e) => return Err(e),
Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
Ok(Verified::All) => "signatures and hashes OK.",
};
ctx.write_str(msg).await
}
pub(super) async fn first_pdu_in_room(ctx: &Context<'_>, room_id: OwnedRoomId) -> AppResult<()> {
if !crate::room::is_server_joined(config::server_name(), &room_id)? {
return Err(AppError::public(
"We are not participating in the room / we don't know about the room ID.",
));
}
unimplemented!()
// let first_pdu = timeline::first_pdu_in_room(&room_id)
// .await
// .map_err(|_| AppError::public("Failed to find the first PDU in database"))?;
// let out = format!("{first_pdu:?}");
// ctx.write_str(&out).await
}
pub(super) async fn latest_pdu_in_room(ctx: &Context<'_>, room_id: OwnedRoomId) -> AppResult<()> {
if !crate::room::is_server_joined(config::server_name(), &room_id)? {
return Err(AppError::public(
"We are not participating in the room / we don't know about the room ID.",
));
}
let latest_pdu = timeline::latest_pdu_in_room(&room_id)?
.map_err(|_| AppError::public("Failed to find the latest PDU in database"))?;
let out = format!("{latest_pdu:?}");
ctx.write_str(&out).await
}
pub(super) async fn force_set_room_state_from_server(
ctx: &Context<'_>,
room_id: OwnedRoomId,
server_name: OwnedServerName,
) -> AppResult<()> {
if !crate::room::is_server_joined(config::server_name(), &room_id)? {
return Err(AppError::public(
"We are not participating in the room / we don't know about the room ID.",
));
}
let first_pdu = timeline::latest_pdu_in_room(&room_id)
.await
.map_err(|_| AppError::public("Failed to find the latest PDU in database"))?;
let room_version = crate::room::get_version(&room_id)?;
let mut state: HashMap<u64, OwnedEventId> = HashMap::new();
let remote_state_response = self
.services
.sending
.send_federation_request(
&server_name,
get_room_state::v1::Request {
room_id: room_id.clone(),
event_id: first_pdu.event_id().to_owned(),
},
)
.await?;
for pdu in remote_state_response.pdus.clone() {
match crate::parse_incoming_pdu(&pdu) {
Ok(t) => t,
Err(e) => {
warn!("could not parse PDU, ignoring: {e}");
continue;
}
};
}
info!("Going through room_state response PDUs");
for result in remote_state_response
.pdus
.iter()
.map(|pdu| crate::server_key::validate_and_add_event_id(pdu, &room_version))
{
let Ok((event_id, value)) = result.await else {
continue;
};
let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| {
error!("invalid pdu in fetching remote room state PDUs response: {value:#?}");
AppError::public(format!("invalid pdu in send_join response: {e:?}"))
})?;
// TODO: admin
// self.services.rooms.outlier.add_pdu_outlier(&event_id, &value);
// if let Some(state_key) = &pdu.state_key {
// let shortstatekey = self
// .services
// .rooms
// .short
// .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)
// .await;
// state.insert(shortstatekey, pdu.event_id.clone());
// }
}
info!("Going through auth_chain response");
// TODO: admin
// for result in remote_state_response
// .auth_chain
// .iter()
// .map(|pdu| crate::server_key::validate_and_add_event_id(pdu, &room_version))
// {
// let Ok((event_id, value)) = result.await else {
// continue;
// };
// self.services.rooms.outlier.add_pdu_outlier(&event_id, &value);
// }
let new_room_state = crate::event::handler::resolve_state(&room_id, &room_version, state).await?;
info!("Forcing new room state");
let HashSetCompressStateEvent {
shortstatehash: short_state_hash,
added,
removed,
} = crate::room::state::save_state(room_id.clone().as_ref(), new_room_state)?;
let state_lock = crate::room::lock_state(&*room_id).await;
crate::room::state::force_state(room_id.clone().as_ref(), short_state_hash, added, removed)?;
info!(
"Updating joined counts for room just in case (e.g. we may have found a difference in \
the room's m.room.member state"
);
crate::room::update_currents(&room_id)?;
drop(state_lock);
ctx.write_str("Successfully forced the room state from the requested remote server.")
.await
}
pub(super) async fn get_signing_keys(
ctx: &Context<'_>,
server_name: Option<OwnedServerName>,
notary: Option<OwnedServerName>,
query: bool,
) -> AppResult<()> {
let server_name = server_name.unwrap_or_else(|| config::server_name().to_owned());
if let Some(notary) = notary {
let signing_keys = crate::server_key::notary_request(¬ary, &server_name).await?;
let out = format!("```rs\n{signing_keys:#?}\n```");
return ctx.write_str(&out).await;
}
let signing_keys = if query {
crate::server_key::server_request(&server_name).await?
} else {
crate::server_key::signing_keys_for(&server_name).await?
};
let out = format!("```rs\n{signing_keys:#?}\n```");
ctx.write_str(&out).await
}
pub(super) async fn get_verify_keys(ctx: &Context<'_>, server_name: Option<OwnedServerName>) -> AppResult<()> {
let server_name = server_name.unwrap_or_else(|| config::server_name().to_owned());
let keys = crate::server_key::verify_keys_for(&server_name);
let mut out = String::new();
writeln!(out, "| Key ID | Public Key |")?;
writeln!(out, "| --- | --- |")?;
for (key_id, key) in keys {
writeln!(out, "| {key_id} | {key:?} |")?;
}
ctx.write_str(&out).await
}
pub(super) async fn resolve_true_destination(
ctx: &Context<'_>,
server_name: OwnedServerName,
no_cache: bool,
) -> AppResult<()> {
let conf = config::get();
if conf.enabled_federation().is_none() {
return Err(AppError::public("Federation is disabled on this homeserver."));
}
if server_name == config::server_name() {
return Err(AppError::public(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
fetching local PDUs.",
));
}
let actual = self
.services
.resolver
.resolve_actual_dest(&server_name, !no_cache)
.await?;
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host);
ctx.write_str(&msg).await
}
pub(super) async fn time(ctx: &Context<'_>) -> AppResult<()> {
let now = SystemTime::now();
let now = utils::time::format(now, "%+");
ctx.write_str(&now).await
}
pub(super) async fn list_dependencies(ctx: &Context<'_>, names: bool) -> AppResult<()> {
if names {
let out = info::cargo::dependencies_names().join(" ");
return ctx.write_str(&out).await;
}
let mut out = String::new();
let deps = info::cargo::dependencies();
writeln!(out, "| name | version | features |")?;
writeln!(out, "| ---- | ------- | -------- |")?;
for (name, dep) in deps {
let version = dep.try_req().unwrap_or("*");
let feats = dep.req_features();
let feats = if !feats.is_empty() {
feats.join(" ")
} else {
String::new()
};
writeln!(out, "| {name} | {version} | {feats} |")?;
}
ctx.write_str(&out).await
}
pub(super) async fn create_jwt(
ctx: &Context<'_>,
user: String,
exp_from_now: Option<u64>,
nbf_from_now: Option<u64>,
issuer: Option<String>,
audience: Option<String>,
) -> AppResult<()> {
use jwt::{Algorithm, EncodingKey, Header, encode};
#[derive(Serialize)]
struct Claim {
sub: String,
iss: String,
aud: String,
exp: usize,
nbf: usize,
}
let conf = config::get();
let Some(jwt_conf) = conf.enabled_jwt() else {
return Err(AppError::public("JWT is not enabled in the configuration"));
};
if jwt_conf.format.as_str() != "HMAC" {
return Err(AppError::public(format!(
"This command only supports HMAC key format, not {}.",
jwt_conf.format
)));
}
let key = EncodingKey::from_secret(jwt_conf.secret.as_ref());
let alg = Algorithm::from_str(jwt_conf.algorithm.as_str())
.map_err(|e| AppError::public(format!("JWT algorithm is not recognized or configured {e}")))?;
let header = Header {
alg,
..Default::default()
};
let claim = Claim {
sub: user,
iss: issuer.unwrap_or_default(),
aud: audience.unwrap_or_default(),
exp: exp_from_now
.and_then(|val| UnixMillis::now().as_secs().checked_add(val))
.map(TryInto::try_into)
.and_then(Result::ok)
.unwrap_or(usize::MAX),
nbf: nbf_from_now
.and_then(|val| UnixMillis::now().as_secs().checked_add(val))
.map(TryInto::try_into)
.and_then(Result::ok)
.unwrap_or(0),
};
encode(&header, &claim, &key)
.map_err(|e| AppError::public(format!("Failed to encode JWT: {e}")))
.map(async |token| ctx.write_str(&token).await)?
.await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/utils.rs | crates/server/src/admin/utils.rs | #![allow(dead_code)]
use crate::core::{OwnedUserId, UserId};
use crate::{AppError, AppResult, IsRemoteOrLocal, config, data};
pub(crate) fn escape_html(s: &str) -> String {
s.replace('&', "&")
.replace('<', "<")
.replace('>', ">")
}
/// Parses user ID
pub(crate) fn parse_user_id(user_id: &str) -> AppResult<OwnedUserId> {
UserId::parse_with_server_name(user_id.to_lowercase(), config::server_name()).map_err(|e| {
AppError::public(format!(
"the supplied username is not a valid username: {e}"
))
})
}
/// Parses user ID as our local user
pub(crate) fn parse_local_user_id(user_id: &str) -> AppResult<OwnedUserId> {
let user_id = parse_user_id(user_id)?;
if !user_id.is_local() {
return Err(AppError::public(
"user {user_id:?} does not belong to our server.",
));
}
Ok(user_id)
}
/// Parses user ID that is an active (not guest or deactivated) local user
pub(crate) async fn parse_active_local_user_id(user_id: &str) -> AppResult<OwnedUserId> {
let user_id = parse_local_user_id(user_id)?;
if !data::user::user_exists(&user_id)? {
return Err(AppError::public(
"user {user_id:?} does not exist on this server.",
));
}
if data::user::is_deactivated(&user_id)? {
return Err(AppError::public("user {user_id:?} is deactivated."));
}
Ok(user_id)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/executor.rs | crates/server/src/admin/executor.rs | use std::sync::{Arc, RwLock as StdRwLock};
use clap::Parser;
use std::sync::OnceLock;
use tokio::sync::{RwLock, broadcast, mpsc};
use crate::admin::{
AdminCommand, CommandInput, Completer, Console, Processor, ProcessorResult, processor,
};
use crate::core::events::room::message::Relation;
use crate::core::events::room::message::RoomMessageEventContent;
use crate::core::identifiers::*;
use crate::room::timeline;
use crate::{AppError, AppResult, PduBuilder, RoomMutexGuard, config};
pub static EXECUTOR: OnceLock<Executor> = OnceLock::new();
pub fn executor() -> &'static Executor {
EXECUTOR.get().expect("executor not initialized")
}
pub async fn init() {
let exec = Executor {
signal: broadcast::channel::<&'static str>(1).0,
channel: StdRwLock::new(None),
handle: RwLock::new(None),
complete: StdRwLock::new(None),
console: Console::new(),
};
_ = exec
.complete
.write()
.expect("locked for writing")
.insert(processor::complete);
_ = exec.handle.write().await.insert(processor::dispatch);
EXECUTOR.set(exec).expect("executor already initialized");
}
pub struct Executor {
pub signal: broadcast::Sender<&'static str>,
pub channel: StdRwLock<Option<mpsc::Sender<CommandInput>>>,
pub handle: RwLock<Option<Processor>>,
pub complete: StdRwLock<Option<Completer>>,
pub console: Arc<Console>,
}
impl std::fmt::Debug for Executor {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Executor").finish()
}
}
impl Executor {
pub(super) async fn handle_signal(&self, sig: &'static str) {
if sig == SIGNAL {
self.signal_execute().await.ok();
}
self.console.handle_signal(sig).await;
}
pub(super) async fn signal_execute(&self) -> AppResult<()> {
let conf = config::get();
// List of commands to execute
let commands = conf.admin.signal_execute.clone();
// When true, errors are ignored and execution continues.
let ignore_errors = conf.admin.execute_errors_ignore;
for (i, command) in commands.iter().enumerate() {
if let Err(e) = self.execute_command(i, command.clone()).await
&& !ignore_errors
{
return Err(e);
}
tokio::task::yield_now().await;
}
Ok(())
}
/// Posts a command to the command processor queue and returns. Processing
/// will take place on the service worker's task asynchronously. Errors if
/// the queue is full.
pub async fn command(&self, command: String, reply_id: Option<OwnedEventId>) -> AppResult<()> {
let Some(sender) = self.channel.read().expect("locked for reading").clone() else {
return Err(AppError::public("admin command queue unavailable"));
};
sender
.send(CommandInput { command, reply_id })
.await
.map_err(|e| AppError::Public(format!("failed to enqueue admin command: {e:?}")))
}
async fn execute_command(&self, i: usize, command: String) -> AppResult<()> {
debug!("Execute command #{i}: executing {command:?}");
match self.command_in_place(command, None).await {
Ok(Some(output)) => Self::execute_command_output(i, &output),
Err(output) => Self::execute_command_error(i, &output),
Ok(None) => {
info!("Execute command #{i} completed (no output).");
Ok(())
}
}
}
/// Dispatches a command to the processor on the current task and waits for
/// completion.
pub async fn command_in_place(
&self,
command: String,
reply_id: Option<OwnedEventId>,
) -> ProcessorResult {
self.process_command(CommandInput { command, reply_id })
.await
}
fn execute_command_output(i: usize, content: &RoomMessageEventContent) -> AppResult<()> {
info!("Execute command #{i} completed:");
super::console::print(content.body());
Ok(())
}
fn execute_command_error(i: usize, content: &RoomMessageEventContent) -> AppResult<()> {
super::console::print_err(content.body());
error!("Execute command #{i} failed.");
Err(AppError::Public(format!(
"Execute command #{i} failed: {}",
content.body()
)))
}
pub(super) async fn handle_command(&self, command: CommandInput) {
match self.process_command(command).await {
Ok(None) => debug!("Command successful with no response"),
Ok(Some(output)) | Err(output) => self.handle_response(output).await.unwrap(),
}
}
/// Invokes the tab-completer to complete the command. When unavailable,
/// None is returned.
pub fn complete_command(&self, command: &str) -> Option<String> {
self.complete
.read()
.expect("locked for reading")
.map(|complete| complete(command))
}
async fn process_command(&self, command: CommandInput) -> ProcessorResult {
let handle = &self
.handle
.read()
.await
.expect("Admin module is not loaded");
handle(command).await
}
async fn handle_response(&self, content: RoomMessageEventContent) -> AppResult<()> {
let Some(Relation::Reply { in_reply_to }) = content.relates_to.as_ref() else {
return Ok(());
};
let Ok(pdu) = timeline::get_pdu(&in_reply_to.event_id) else {
error!(
event_id = ?in_reply_to.event_id,
"Missing admin command in_reply_to event"
);
return Ok(());
};
let response_sender = if crate::room::is_admin_room(&pdu.room_id)? {
config::server_user_id()
} else {
&pdu.sender
};
respond_to_room(content, &pdu.room_id, response_sender).await
}
pub(super) async fn interrupt(&self) {
//TODO: not unwind safe
self.console.interrupt();
_ = self.channel.write().expect("locked for writing").take();
self.console.close().await;
}
}
/// Sends markdown notice to the admin room as the admin user.
pub async fn send_notice(body: &str) -> AppResult<()> {
send_message(RoomMessageEventContent::notice_markdown(body)).await
}
/// Sends markdown message (not an m.notice for notification reasons) to the
/// admin room as the admin user.
pub async fn send_text(body: &str) -> AppResult<()> {
send_message(RoomMessageEventContent::text_markdown(body)).await
}
/// Sends a message to the admin room as the admin user (see send_text() for
/// convenience).
pub async fn send_message(message_content: RoomMessageEventContent) -> AppResult<()> {
let user_id = &config::server_user_id();
let room_id = crate::room::get_admin_room()?;
respond_to_room(message_content, &room_id, user_id).await
}
pub(super) const SIGNAL: &str = "SIGUSR2";
// Parse chat messages from the admin room into an AdminCommand object
fn parse_admin_command(command_line: &str) -> std::result::Result<AdminCommand, String> {
// Note: argv[0] is `@palpo:servername:`, which is treated as the main command
let mut argv: Vec<_> = command_line.split_whitespace().collect();
// Replace `help command` with `command --help`
// Clap has a help subcommand, but it omits the long help description.
if argv.len() > 1 && argv[1] == "help" {
argv.remove(1);
argv.push("--help");
}
// Backwards compatibility with `register_appservice`-style commands
let command_with_dashes;
if argv.len() > 1 && argv[1].contains('_') {
command_with_dashes = argv[1].replace('_', "-");
argv[1] = &command_with_dashes;
}
AdminCommand::try_parse_from(argv).map_err(|error| error.to_string())
}
async fn respond_to_room(
content: RoomMessageEventContent,
room_id: &RoomId,
user_id: &UserId,
) -> AppResult<()> {
assert!(crate::room::is_admin_room(room_id)?, "sender is not admin");
let state_lock = crate::room::lock_state(room_id).await;
if let Err(e) = timeline::build_and_append_pdu(
PduBuilder::timeline(&content),
user_id,
room_id,
&crate::room::get_version(room_id)?,
&state_lock,
)
.await
{
handle_response_error(e, room_id, user_id, &state_lock).await?;
}
Ok(())
}
async fn handle_response_error(
e: AppError,
room_id: &RoomId,
user_id: &UserId,
state_lock: &RoomMutexGuard,
) -> AppResult<()> {
error!("Failed to build and append admin room response PDU: \"{e}\"");
let content = RoomMessageEventContent::text_plain(format!(
"Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command \
may have finished successfully, but we could not return the output."
));
timeline::build_and_append_pdu(
PduBuilder::timeline(&content),
user_id,
room_id,
&crate::room::get_version(room_id)?,
state_lock,
)
.await?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/server.rs | crates/server/src/admin/server.rs | mod cmd;
use cmd::*;
use std::path::PathBuf;
use clap::Subcommand;
use crate::AppResult;
use crate::macros::admin_command_dispatch;
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub(crate) enum ServerCommand {
// /// - Time elapsed since startup
// Uptime,
/// - Show configuration values
ShowConfig,
/// - Reload configuration values
ReloadConfig { path: Option<PathBuf> },
/// - List the features built into the server
ListFeatures {
#[arg(short, long)]
available: bool,
#[arg(short, long)]
enabled: bool,
#[arg(short, long)]
comma: bool,
},
/// - Send a message to the admin room.
AdminNotice { message: Vec<String> },
/// - Hot-reload the server
#[clap(alias = "reload")]
ReloadMods,
/// - Restart the server
Restart {
#[arg(short, long)]
force: bool,
},
/// - Shutdown the server
Shutdown,
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/console.rs | crates/server/src/admin/console.rs | use std::{
collections::VecDeque,
sync::{Arc, Mutex},
};
use futures_util::future::{AbortHandle, Abortable};
use rustyline_async::{Readline, ReadlineError, ReadlineEvent};
use termimad::MadSkin;
use tokio::task::JoinHandle;
use crate::core::events::room::message::RoomMessageEventContent;
use crate::defer;
use crate::logging::{self, is_systemd_mode};
pub struct Console {
worker_join: Mutex<Option<JoinHandle<()>>>,
input_abort: Mutex<Option<AbortHandle>>,
command_abort: Mutex<Option<AbortHandle>>,
history: Mutex<VecDeque<String>>,
output: MadSkin,
}
const PROMPT: &str = "palpo> ";
const HISTORY_LIMIT: usize = 48;
impl Console {
pub(crate) fn new() -> Arc<Self> {
Arc::new(Self {
worker_join: None.into(),
input_abort: None.into(),
command_abort: None.into(),
history: VecDeque::with_capacity(HISTORY_LIMIT).into(),
output: configure_output(MadSkin::default_dark()),
})
}
pub(crate) async fn handle_signal(self: &Arc<Self>, sig: &'static str) {
if sig == "SIGINT" {
self.interrupt_command();
self.start().await;
}
}
pub async fn start(self: &Arc<Self>) {
let mut worker_join = self.worker_join.lock().expect("locked");
if worker_join.is_none() {
let self_ = Arc::clone(self);
_ = worker_join.insert(tokio::spawn(self_.worker()));
}
}
pub async fn close(self: &Arc<Self>) {
self.interrupt();
let Some(worker_join) = self.worker_join.lock().expect("locked").take() else {
return;
};
_ = worker_join.await;
}
pub fn interrupt(self: &Arc<Self>) {
self.interrupt_command();
self.interrupt_readline();
self.worker_join
.lock()
.expect("locked")
.as_ref()
.map(JoinHandle::abort);
}
pub fn interrupt_readline(self: &Arc<Self>) {
if let Some(input_abort) = self.input_abort.lock().expect("locked").take() {
debug!("Interrupting console readline...");
input_abort.abort();
}
}
pub fn interrupt_command(self: &Arc<Self>) {
if let Some(command_abort) = self.command_abort.lock().expect("locked").take() {
debug!("Interrupting console command...");
command_abort.abort();
}
}
#[tracing::instrument(skip_all, name = "console", level = "trace")]
async fn worker(self: Arc<Self>) {
debug!("session starting");
self.output.print_inline(&format!(
"**palpo {}** admin console\n",
crate::info::version()
));
self.output
.print_text("\"help\" for help, ^D to exit the console");
loop {
match self.readline().await {
Ok(event) => match event {
ReadlineEvent::Line(string) => self.clone().handle(string).await,
ReadlineEvent::Interrupted => continue,
ReadlineEvent::Eof => break,
// ReadlineEvent::Quit => self.server.shutdown().unwrap_or_else(error::default_log),
},
Err(e) => match e {
ReadlineError::Closed => break,
ReadlineError::IO(e) => {
error!("console I/O: {e:?}");
break;
}
},
}
}
debug!("session ending");
self.worker_join.lock().expect("locked").take();
}
async fn readline(self: &Arc<Self>) -> Result<ReadlineEvent, ReadlineError> {
let _suppression = (!is_systemd_mode()).then(logging::Suppress::new);
let (mut readline, _writer) = Readline::new(PROMPT.to_owned())?;
// let self_ = Arc::clone(self);
// TODO: admin
// readline.set_tab_completer(move |line| self_.tab_complete(line));
self.set_history(&mut readline);
let future = readline.readline();
let (abort, abort_reg) = AbortHandle::new_pair();
let future = Abortable::new(future, abort_reg);
_ = self.input_abort.lock().expect("locked").insert(abort);
defer! {{
_ = self.input_abort.lock().expect("locked").take();
}}
let Ok(result) = future.await else {
return Ok(ReadlineEvent::Eof);
};
readline.flush()?;
result
}
async fn handle(self: Arc<Self>, line: String) {
if line.trim().is_empty() {
return;
}
self.add_history(line.clone());
let future = self.clone().process(line);
let (abort, abort_reg) = AbortHandle::new_pair();
let future = Abortable::new(future, abort_reg);
_ = self.command_abort.lock().expect("locked").insert(abort);
defer! {{
_ = self.command_abort.lock().expect("locked").take();
}}
_ = future.await;
}
async fn process(self: Arc<Self>, line: String) {
match crate::admin::executor().command_in_place(line, None).await {
Ok(Some(ref content)) => self.output(content),
Err(ref content) => self.output_err(content),
_ => unreachable!(),
}
}
fn output_err(self: Arc<Self>, output_content: &RoomMessageEventContent) {
let output = configure_output_err(self.output.clone());
output.print_text(output_content.body());
}
fn output(self: Arc<Self>, output_content: &RoomMessageEventContent) {
self.output.print_text(output_content.body());
}
fn set_history(&self, readline: &mut Readline) {
self.history
.lock()
.expect("locked")
.iter()
.rev()
.for_each(|entry| {
readline
.add_history_entry(entry.clone())
.expect("added history entry");
});
}
fn add_history(&self, line: String) {
let mut history = self.history.lock().expect("locked");
history.push_front(line);
history.truncate(HISTORY_LIMIT);
}
fn tab_complete(&self, line: &str) -> String {
crate::admin::executor()
.complete_command(line)
.unwrap_or_else(|| line.to_owned())
}
}
/// Standalone/static markdown printer for errors.
pub fn print_err(markdown: &str) {
let output = configure_output_err(MadSkin::default_dark());
output.print_text(markdown);
}
/// Standalone/static markdown printer.
pub fn print(markdown: &str) {
let output = configure_output(MadSkin::default_dark());
output.print_text(markdown);
}
fn configure_output_err(mut output: MadSkin) -> MadSkin {
use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color};
let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(196), Color::AnsiValue(234));
output.inline_code = code_style.clone();
output.code_block = LineStyle {
left_margin: 0,
right_margin: 0,
align: Alignment::Left,
compound_style: code_style,
};
output
}
fn configure_output(mut output: MadSkin) -> MadSkin {
use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color};
let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(40), Color::AnsiValue(234));
output.inline_code = code_style.clone();
output.code_block = LineStyle {
left_margin: 0,
right_margin: 0,
align: Alignment::Left,
compound_style: code_style,
};
let table_style = CompoundStyle::default();
output.table = LineStyle {
left_margin: 1,
right_margin: 1,
align: Alignment::Left,
compound_style: table_style,
};
output
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/room.rs | crates/server/src/admin/room.rs | mod alias;
mod directory;
mod info;
mod moderation;
use clap::Subcommand;
use self::{
alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand,
moderation::RoomModerationCommand,
};
use crate::admin::{Context, PAGE_SIZE, get_room_info};
use crate::core::OwnedRoomId;
use crate::macros::admin_command_dispatch;
use crate::{AppError, AppResult, data};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub(crate) enum RoomCommand {
/// - List all rooms the server knows about
#[clap(alias = "list")]
ListRooms {
page: Option<usize>,
/// Excludes rooms that we have federation disabled with
#[arg(long)]
exclude_disabled: bool,
/// Excludes rooms that we have banned
#[arg(long)]
exclude_banned: bool,
#[arg(long)]
/// Whether to only output room IDs without supplementary room
/// information
no_details: bool,
},
#[command(subcommand)]
/// - View information about a room we know about
Info(RoomInfoCommand),
#[command(subcommand)]
/// - Manage moderation of remote or local rooms
Moderation(RoomModerationCommand),
#[command(subcommand)]
/// - Manage rooms' aliases
Alias(RoomAliasCommand),
#[command(subcommand)]
/// - Manage the room directory
Directory(RoomDirectoryCommand),
/// - Check if we know about a room
Exists { room_id: OwnedRoomId },
}
pub(super) async fn list_rooms(
ctx: &Context<'_>,
page: Option<usize>,
exclude_disabled: bool,
exclude_banned: bool,
no_details: bool,
) -> AppResult<()> {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
let page = page.unwrap_or(1);
let mut rooms = crate::room::all_room_ids()?
.iter()
.filter(|room_id| !exclude_disabled || !crate::room::is_disabled(room_id).unwrap_or(false))
.filter(|room_id| !exclude_banned || !data::room::is_banned(room_id).unwrap_or(true))
.map(|room_id| get_room_info(room_id))
.collect::<Vec<_>>();
rooms.sort_by_key(|r| r.joined_members);
rooms.reverse();
let rooms = rooms
.into_iter()
.skip(page.saturating_sub(1).saturating_mul(PAGE_SIZE))
.take(PAGE_SIZE)
.collect::<Vec<_>>();
if rooms.is_empty() {
return Err(AppError::public("No more rooms."));
}
let body = rooms
.iter()
.map(|info| {
if no_details {
format!("{}", info.id)
} else {
format!(
"{}\tMembers: {}\tName: {}",
info.id, info.joined_members, info.name
)
}
})
.collect::<Vec<_>>()
.join("\n");
ctx.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),))
.await
}
pub(super) async fn exists(ctx: &Context<'_>, room_id: OwnedRoomId) -> AppResult<()> {
let result = crate::room::room_exists(&room_id)?;
ctx.write_str(&format!("{result}")).await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/room/info.rs | crates/server/src/admin/room/info.rs | use clap::Subcommand;
use crate::admin::{Context, admin_command_dispatch};
use crate::core::OwnedRoomId;
use crate::{AppError, AppResult};
use crate::{IsRemoteOrLocal, data};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub(crate) enum RoomInfoCommand {
/// - List joined members in a room
ListJoinedMembers {
room_id: OwnedRoomId,
/// Lists only our local users in the specified room
#[arg(long)]
local_only: bool,
},
/// - Displays room topic
///
/// Room topics can be huge, so this is in its
/// own separate command
ViewRoomTopic { room_id: OwnedRoomId },
}
async fn list_joined_members(
ctx: &Context<'_>,
room_id: OwnedRoomId,
local_only: bool,
) -> AppResult<()> {
let room_name = crate::room::get_name(&room_id).unwrap_or_else(|_| room_id.to_string());
let member_info: Vec<_> = crate::room::joined_users(&room_id, None)?
.into_iter()
.filter(|user_id| if local_only { user_id.is_local() } else { true })
.map(|user_id| {
(
data::user::display_name(&user_id)
.ok()
.flatten()
.unwrap_or_else(|| user_id.to_string()),
user_id,
)
})
.collect();
let num = member_info.len();
let body = member_info
.into_iter()
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
.collect::<Vec<_>>()
.join("\n");
ctx.write_str(&format!(
"{num} Members in Room \"{room_name}\":\n```\n{body}\n```",
))
.await
}
async fn view_room_topic(ctx: &Context<'_>, room_id: OwnedRoomId) -> AppResult<()> {
let Ok(room_topic) = crate::room::get_topic(&room_id) else {
return Err(AppError::public("Room does not have a room topic set."));
};
ctx.write_str(&format!("Room topic:\n```\n{room_topic}\n```"))
.await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/room/alias.rs | crates/server/src/admin/room/alias.rs | use std::fmt::Write;
use clap::Subcommand;
use crate::admin::Context;
use crate::core::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId};
use crate::{AppError, AppResult, config};
#[derive(Debug, Subcommand)]
pub(crate) enum RoomAliasCommand {
/// - Make an alias point to a room.
Set {
#[arg(short, long)]
/// Set the alias even if a room is already using it
force: bool,
/// The room id to set the alias on
room_id: OwnedRoomId,
/// The alias localpart to use (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - Remove a local alias
Remove {
/// The alias localpart to remove (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - Show which room is using an alias
Which {
/// The alias localpart to look up (`alias`, not
/// `#alias:servername.tld`)
room_alias_localpart: String,
},
/// - List aliases currently being used
List {
/// If set, only list the aliases for this room
room_id: Option<OwnedRoomId>,
},
}
pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> AppResult<()> {
let server_user = config::server_user();
match command {
RoomAliasCommand::Set {
ref room_alias_localpart,
..
}
| RoomAliasCommand::Remove {
ref room_alias_localpart,
}
| RoomAliasCommand::Which {
ref room_alias_localpart,
} => {
let room_alias_str = format!("#{}:{}", room_alias_localpart, config::server_name());
let room_alias = match RoomAliasId::parse(room_alias_str) {
Ok(alias) => alias,
Err(err) => {
return Err(AppError::public(format!("Failed to parse alias: {err}")));
}
};
match command {
RoomAliasCommand::Set { force, room_id, .. } => {
match (force, crate::room::resolve_local_alias(&room_alias)) {
(true, Ok(id)) => {
match crate::room::set_alias(&room_id, &room_alias, &server_user.id) {
Err(err) => {
Err(AppError::public(format!("Failed to remove alias: {err}")))
}
Ok(()) => {
context
.write_str(&format!(
"Successfully overwrote alias (formerly {id})"
))
.await
}
}
}
(false, Ok(id)) => Err(AppError::public(format!(
"Refusing to overwrite in use alias for {id}, use -f or --force to \
overwrite"
))),
(_, Err(_)) => {
match crate::room::set_alias(&room_id, &room_alias, &server_user.id) {
Err(err) => {
Err(AppError::public(format!("Failed to remove alias: {err}")))
}
Ok(()) => context.write_str("Successfully set alias").await,
}
}
}
}
RoomAliasCommand::Remove { .. } => {
match crate::room::resolve_local_alias(&room_alias) {
Err(_) => Err(AppError::public("Alias isn't in use.")),
Ok(id) => {
match crate::room::remove_alias(&room_alias, &server_user).await {
Err(e) => {
Err(AppError::public(format!("Failed to remove alias: {e}")))
}
Ok(()) => {
context.write_str(&format!("Removed alias from {id}")).await
}
}
}
}
}
RoomAliasCommand::Which { .. } => {
match crate::room::resolve_local_alias(&room_alias) {
Err(_) => Err(AppError::public("Alias isn't in use.")),
Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await,
}
}
RoomAliasCommand::List { .. } => unreachable!(),
}
}
RoomAliasCommand::List { room_id } => {
if let Some(room_id) = room_id {
let aliases: Vec<OwnedRoomAliasId> = crate::room::local_aliases_for_room(&room_id)?;
let plain_list = aliases.iter().fold(String::new(), |mut output, alias| {
writeln!(output, "- {alias}")
.expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases for {room_id}:\n{plain_list}");
context.write_str(&plain).await
} else {
let aliases = crate::room::all_local_aliases()?;
let server_name = config::server_name();
let plain_list = aliases
.iter()
.fold(String::new(), |mut output, (alias, id)| {
writeln!(output, "- `{alias}` -> #{id}:{server_name}")
.expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases:\n{plain_list}");
context.write_str(&plain).await
}
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/room/moderation.rs | crates/server/src/admin/room/moderation.rs | use clap::Subcommand;
use crate::admin::{Context, get_room_info};
use crate::core::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
use crate::macros::admin_command_dispatch;
use crate::{AppError, AppResult, IsRemoteOrLocal, config, membership};
#[admin_command_dispatch]
#[derive(Debug, Subcommand)]
pub(crate) enum RoomModerationCommand {
/// - Bans a room from local users joining and evicts all our local users
/// (including server
/// admins)
/// from the room. Also blocks any invites (local and remote) for the
/// banned room, and disables federation entirely with it.
BanRoom {
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: OwnedRoomOrAliasId,
},
/// - Bans a list of rooms (room IDs and room aliases) from a newline
/// delimited codeblock similar to `user deactivate-all`. Applies the same
/// steps as ban-room
BanListOfRooms,
/// - Unbans a room to allow local users to join again
UnbanRoom {
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: OwnedRoomOrAliasId,
},
/// - List of all rooms we have banned
ListBannedRooms {
#[arg(long)]
/// Whether to only output room IDs without supplementary room
/// information
no_details: bool,
},
}
async fn ban_room(ctx: &Context<'_>, room: OwnedRoomOrAliasId) -> AppResult<()> {
debug!("Got room alias or ID: {}", room);
let admin_room_alias = config::admin_alias();
if let Ok(admin_room_id) = crate::room::get_admin_room()
&& (room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias))
{
return Err(AppError::public("Not allowed to ban the admin room."));
}
let room_id = if room.is_room_id() {
let room_id = match RoomId::parse(&room) {
Ok(room_id) => room_id,
Err(e) => {
return Err(AppError::public(format!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
}
};
debug!("Room specified is a room ID, banning room ID");
crate::room::ban_room(&room_id, true)?;
room_id.to_owned()
} else if room.is_room_alias_id() {
let room_alias = match RoomAliasId::parse(&room) {
Ok(room_alias) => room_alias,
Err(e) => {
return Err(AppError::public(format!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
}
};
debug!(
"Room specified is not a room ID, attempting to resolve room alias to a room ID \
locally, if not using get_alias_helper to fetch room ID remotely"
);
let room_id = match crate::room::alias::resolve_local_alias(&room_alias) {
Ok(room_id) => room_id,
_ => {
debug!(
"We don't have this room alias to a room ID locally, attempting to fetch \
room ID over federation"
);
match crate::room::alias::resolve_alias(&room_alias, None).await {
Ok((room_id, servers)) => {
debug!(
?room_id,
?servers,
"Got federation response fetching room ID for {room_id}"
);
room_id
}
Err(e) => {
return Err(AppError::public(format!(
"Failed to resolve room alias {room_alias} to a room ID: {e}"
)));
}
}
}
};
crate::room::ban_room(&room_id, true)?;
room_id
} else {
return Err(AppError::public(
"Room specified is not a room ID or room alias. Please note that this requires a \
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`)",
));
};
// TODO: this should be done
debug!("Making all users leave the room {room_id} and forgetting it");
// let mut users = self
// .services
// .rooms
// .state_cache
// .room_members(&room_id)
// .map(ToOwned::to_owned)
// .ready_filter(|user| user.is_local())
// .boxed();
// while let Some(ref user_id) = users.next().await {
// debug!(
// "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \
// evicting admins too)",
// );
// if let Err(e) = membership::leave_room(user_id, &room_id, None).boxed().await {
// warn!("Failed to leave room: {e}");
// }
// crate::membership::forget_room(user_id, &room_id)?;
// }
// self.services
// .rooms
// .alias
// .local_aliases_for_room(&room_id)
// .map(ToOwned::to_owned)
// .for_each(async |local_alias| {
// if let Ok(server_user) = crate::data::user::get_user(config::server_user()) {
// crate::room::remove_alias(&local_alias, &server_user).await.ok();
// }
// })
// .await;
// unpublish from room directory
crate::room::directory::set_public(&room_id, false)?;
crate::room::disable_room(&room_id, true)?;
ctx.write_str(
"Room banned, removed all our local users, and disabled incoming federation with room.",
)
.await
}
async fn ban_list_of_rooms(ctx: &Context<'_>) -> AppResult<()> {
if ctx.body.len() < 2
|| !ctx.body[0].trim().starts_with("```")
|| ctx.body.last().unwrap_or(&"").trim() != "```"
{
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let rooms_s = ctx
.body
.to_vec()
.drain(1..ctx.body.len().saturating_sub(1))
.collect::<Vec<_>>();
let admin_room_alias = config::admin_alias();
let mut room_ban_count: usize = 0;
let mut room_ids: Vec<OwnedRoomId> = Vec::new();
for &room in &rooms_s {
match <&RoomOrAliasId>::try_from(room) {
Ok(room_alias_or_id) => {
if let Ok(admin_room_id) = crate::room::get_admin_room()
&& (room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias))
{
warn!("User specified admin room in bulk ban list, ignoring");
continue;
}
if room_alias_or_id.is_room_id() {
let room_id = match RoomId::parse(room_alias_or_id) {
Ok(room_id) => room_id,
Err(e) => {
// ignore rooms we failed to parse
warn!(
"Error parsing room \"{room}\" during bulk room banning, \
ignoring error and logging here: {e}"
);
continue;
}
};
room_ids.push(room_id.to_owned());
}
if room_alias_or_id.is_room_alias_id() {
match RoomAliasId::parse(room_alias_or_id) {
Ok(room_alias) => {
let room_id = match crate::room::alias::resolve_local_alias(&room_alias)
{
Ok(room_id) => room_id,
_ => {
debug!(
"We don't have this room alias to a room ID locally, \
attempting to fetch room ID over federation"
);
match crate::room::alias::resolve_alias(&room_alias, None).await
{
Ok((room_id, servers)) => {
debug!(
?room_id,
?servers,
"Got federation response fetching room ID for \
{room}",
);
room_id
}
Err(e) => {
warn!(
"Failed to resolve room alias {room} to a room \
ID: {e}"
);
continue;
}
}
}
};
room_ids.push(room_id);
}
Err(e) => {
warn!(
"Error parsing room \"{room}\" during bulk room banning, \
ignoring error and logging here: {e}"
);
continue;
}
}
}
}
Err(e) => {
warn!(
"Error parsing room \"{room}\" during bulk room banning, ignoring error and \
logging here: {e}"
);
continue;
}
}
}
for room_id in room_ids {
crate::room::ban_room(&room_id, true)?;
debug!("Banned {room_id} successfully");
room_ban_count = room_ban_count.saturating_add(1);
debug!("Making all users leave the room {room_id} and forgetting it");
let users = crate::room::joined_users(&room_id, None)?
.into_iter()
.filter(|user| user.is_local())
.collect::<Vec<_>>();
for user_id in &users {
debug!(
"Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \
evicting admins too)",
);
if let Err(e) = membership::leave_room(user_id, &room_id, None).await {
warn!("Failed to leave room: {e}");
}
membership::forget_room(user_id, &room_id)?;
}
// TODO: admin
// // remove any local aliases, ignore errors
// self.services
// .rooms
// .alias
// .local_aliases_for_room(&room_id)
// .map(ToOwned::to_owned)
// .for_each(async |local_alias| {
// if let Ok(server_user) = crate::data::user::get_user(config::server_user()) {
// crate::room::remove_alias(&local_alias, &server_user).await.ok();
// }
// })
// .await;
// unpublish from room directory, ignore errors
crate::room::directory::set_public(&room_id, false)?;
crate::room::disable_room(&room_id, true)?;
}
ctx.write_str(&format!(
"Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \
disabled incoming federation with the room."
))
.await
}
async fn unban_room(ctx: &Context<'_>, room: OwnedRoomOrAliasId) -> AppResult<()> {
let room_id = if room.is_room_id() {
let room_id = match RoomId::parse(&room) {
Ok(room_id) => room_id,
Err(e) => {
return Err(AppError::public(format!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
}
};
debug!("Room specified is a room ID, unbanning room ID");
crate::room::ban_room(&room_id, false)?;
room_id.to_owned()
} else if room.is_room_alias_id() {
let room_alias = match RoomAliasId::parse(&room) {
Ok(room_alias) => room_alias,
Err(e) => {
return Err(AppError::public(format!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
}
};
debug!(
"Room specified is not a room ID, attempting to resolve room alias to a room ID \
locally, if not using get_alias_helper to fetch room ID remotely"
);
let room_id = match crate::room::alias::resolve_local_alias(&room_alias) {
Ok(room_id) => room_id,
_ => {
debug!(
"We don't have this room alias to a room ID locally, attempting to fetch \
room ID over federation"
);
match crate::room::alias::resolve_alias(&room_alias, None).await {
Ok((room_id, servers)) => {
debug!(
?room_id,
?servers,
"Got federation response fetching room ID for room {room}"
);
room_id
}
Err(e) => {
return Err(AppError::public(format!(
"Failed to resolve room alias {room} to a room ID: {e}"
)));
}
}
}
};
crate::room::ban_room(&room_id, false)?;
room_id
} else {
return Err(AppError::public(
"Room specified is not a room ID or room alias. Please note that this requires a \
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`)",
));
};
crate::room::disable_room(&room_id, false)?;
ctx.write_str("Room unbanned and federation re-enabled.")
.await
}
async fn list_banned_rooms(ctx: &Context<'_>, no_details: bool) -> AppResult<()> {
let room_ids: Vec<OwnedRoomId> = crate::room::list_banned_rooms()?;
if room_ids.is_empty() {
return Err(AppError::public("No rooms are banned."));
}
let mut rooms = room_ids
.iter()
.map(|room_id| get_room_info(room_id))
.collect::<Vec<_>>();
rooms.sort_by_key(|r| r.joined_members);
rooms.reverse();
let num = rooms.len();
let body = rooms
.iter()
.map(|info| {
if no_details {
format!("{}", info.id)
} else {
format!(
"{}\tMembers: {}\tName: {}",
info.id, info.joined_members, info.name
)
}
})
.collect::<Vec<_>>()
.join("\n");
ctx.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",))
.await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/room/directory.rs | crates/server/src/admin/room/directory.rs | use clap::Subcommand;
use crate::admin::{Context, PAGE_SIZE, get_room_info};
use crate::core::OwnedRoomId;
use crate::{AppError, AppResult};
#[derive(Debug, Subcommand)]
pub(crate) enum RoomDirectoryCommand {
/// - Publish a room to the room directory
Publish {
/// The room id of the room to publish
room_id: OwnedRoomId,
},
/// - Unpublish a room to the room directory
Unpublish {
/// The room id of the room to unpublish
room_id: OwnedRoomId,
},
/// - List rooms that are published
List { page: Option<usize> },
}
pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> AppResult<()> {
match command {
RoomDirectoryCommand::Publish { room_id } => {
crate::room::directory::set_public(&room_id, true)?;
context.write_str("Room published").await
}
RoomDirectoryCommand::Unpublish { room_id } => {
crate::room::directory::set_public(&room_id, false)?;
context.write_str("Room unpublished").await
}
RoomDirectoryCommand::List { page } => {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
let page = page.unwrap_or(1);
let mut rooms: Vec<_> = crate::room::public_room_ids()?
.into_iter()
.map(|room_id| get_room_info(&room_id))
.collect();
rooms.sort_by_key(|r| r.joined_members);
rooms.reverse();
let rooms: Vec<_> = rooms
.into_iter()
.skip(page.saturating_sub(1).saturating_mul(PAGE_SIZE))
.take(PAGE_SIZE)
.collect();
if rooms.is_empty() {
return Err(AppError::public("No more rooms."));
}
let body = rooms
.iter()
.map(|info| {
format!(
"{} | Members: {} | Name: {}",
info.id, info.joined_members, info.name
)
})
.collect::<Vec<_>>()
.join("\n");
context
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",))
.await
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/user/cmd.rs | crates/server/src/admin/user/cmd.rs | use crate::admin::{Context, get_room_info, parse_active_local_user_id, parse_local_user_id};
use crate::core::{
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId,
events::{
RoomAccountDataEventType, StateEventType,
room::{power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent},
tag::{TagEventContent, TagInfo},
},
};
use crate::room::timeline;
use crate::user::full_user_deactivate;
use crate::{
AppError, AppResult, IsRemoteOrLocal, OptionalExtension, PduBuilder, config, data, membership,
utils,
};
const AUTO_GEN_PASSWORD_LENGTH: usize = 25;
const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin.";
pub(super) async fn list_users(ctx: &Context<'_>) -> AppResult<()> {
let users: Vec<_> = crate::user::list_local_users()?;
let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len());
plain_msg += users
.iter()
.map(|u| u.to_string())
.collect::<Vec<_>>()
.join("\n")
.as_str();
plain_msg += "\n```";
ctx.write_str(&plain_msg).await
}
pub(super) async fn create_user(
ctx: &Context<'_>,
username: String,
password: Option<String>,
) -> AppResult<()> {
// Validate user id
let user_id = parse_local_user_id(&username)?;
let conf = config::get();
if let Err(e) = user_id.validate_strict()
&& conf.emergency_password.is_none()
{
return Err(AppError::public(format!(
"Username {user_id} contains disallowed characters or spaces: {e}"
)));
}
if data::user::user_exists(&user_id)? {
return Err(AppError::public(format!("User {user_id} already exists")));
}
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
// Create user
crate::user::create_user(&user_id, Some(password.as_str()))?;
// Default to pretty displayname
let display_name = user_id.localpart().to_owned();
// If `new_user_displayname_suffix` is set, registration will push whatever
// content is set to the user's display name with a space before it
// if !conf.new_user_displayname_suffix.is_empty() {
// write!(
// displayname,
// " {}",
// conf.new_user_displayname_suffix
// )?;
// }
crate::data::user::set_display_name(&user_id, &display_name)?;
// Initial account data
crate::user::set_data(
&user_id,
None,
&crate::core::events::GlobalAccountDataEventType::PushRules.to_string(),
serde_json::to_value(crate::core::events::push_rules::PushRulesEvent {
content: crate::core::events::push_rules::PushRulesEventContent {
global: crate::core::push::Ruleset::server_default(&user_id),
},
})?,
)?;
if !conf.auto_join_rooms.is_empty() {
for room in &conf.auto_join_rooms {
let Ok(room_id) = crate::room::alias::resolve(room).await else {
error!(
%user_id,
"Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"
);
continue;
};
if !crate::room::is_server_joined(config::server_name(), &room_id)? {
warn!("Skipping room {room} to automatically join as we have never joined before.");
continue;
}
if let Ok(room_server_name) = room.server_name() {
let user = data::user::get_user(&user_id)?;
match membership::join_room(
&user,
None,
&room_id,
Some("Automatically joining this room upon registration".to_owned()),
&[
config::server_name().to_owned(),
room_server_name.to_owned(),
],
None,
None,
Default::default(),
)
.await
{
Ok(_response) => {
info!("Automatically joined room {room} for user {user_id}");
}
Err(e) => {
// don't return this error so we don't fail registrations
error!("Failed to automatically join room {room} for user {user_id}: {e}");
crate::admin::send_text(&format!(
"Failed to automatically join room {room} for user {user_id}: \
{e}"
))
.await?;
}
}
}
}
}
// we dont add a device since we're not the user, just the creator
// if this account creation is from the CLI / --execute, invite the first user
// to admin room
if let Ok(admin_room) = crate::room::get_admin_room() {
if crate::room::joined_member_count(&admin_room).is_ok_and(|c| c == 1) {
crate::user::make_user_admin(&user_id)?;
warn!("Granting {user_id} admin privileges as the first user");
}
} else {
debug!("create_user admin command called without an admin room being available");
}
ctx.write_str(&format!(
"Created user with user_id: {user_id} and password: `{password}`"
))
.await
}
pub(super) async fn deactivate(
ctx: &Context<'_>,
no_leave_rooms: bool,
user_id: String,
) -> AppResult<()> {
// Validate user id
let user_id = parse_local_user_id(&user_id)?;
// don't deactivate the server service account
if user_id == config::server_user_id() {
return Err(AppError::public(
"Not allowed to deactivate the server service account.",
));
}
// TODO: admin
// crate::user::deactivate_account(&user_id).await?;
if !no_leave_rooms {
crate::admin::send_text(&format!(
"Making {user_id} leave all rooms after deactivation..."
))
.await?;
let all_joined_rooms: Vec<OwnedRoomId> = data::user::joined_rooms(&user_id)?;
full_user_deactivate(&user_id, &all_joined_rooms).await?;
membership::leave_all_rooms(&user_id).await?;
}
ctx.write_str(&format!("User {user_id} has been deactivated"))
.await
}
pub(super) async fn reset_password(
ctx: &Context<'_>,
username: String,
password: Option<String>,
) -> AppResult<()> {
let user_id = parse_local_user_id(&username)?;
if user_id == config::server_user_id() {
return Err(AppError::public(
"Not allowed to set the password for the server account. Please use the emergency password config option.",
));
}
let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
match crate::user::set_password(&user_id, new_password.as_str()) {
Err(e) => {
return Err(AppError::public(format!(
"Couldn't reset the password for user {user_id}: {e}"
)));
}
Ok(()) => write!(
ctx,
"Successfully reset the password for user {user_id}: `{new_password}`"
),
}
.await
}
pub(super) async fn deactivate_all(
ctx: &Context<'_>,
no_leave_rooms: bool,
force: bool,
) -> AppResult<()> {
if ctx.body.len() < 2
|| !ctx.body[0].trim().starts_with("```")
|| ctx.body.last().unwrap_or(&"").trim() != "```"
{
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let usernames = ctx
.body
.to_vec()
.drain(1..ctx.body.len().saturating_sub(1))
.collect::<Vec<_>>();
let mut user_ids: Vec<OwnedUserId> = Vec::with_capacity(usernames.len());
let mut admins = Vec::new();
for username in usernames {
match parse_active_local_user_id(username).await {
Err(e) => {
crate::admin::send_text(&format!(
"{username} is not a valid username, skipping over: {e}"
))
.await?;
continue;
}
Ok(user_id) => {
if crate::data::user::is_admin(&user_id)? && !force {
crate::admin::send_text(&format!(
"{username} is an admin and --force is not set, skipping over"
))
.await?;
admins.push(username);
continue;
}
// don't deactivate the server service account
if user_id == config::server_user_id() {
crate::admin::send_text(&format!(
"{username} is the server service account, skipping over"
))
.await?;
continue;
}
user_ids.push(user_id.to_owned());
}
}
}
let mut deactivation_count: usize = 0;
for user_id in user_ids {
match crate::user::deactivate_account(&user_id).await {
Err(e) => {
let _ = crate::admin::send_text(&format!("failed deactivating user: {e}")).await;
}
Ok(()) => {
deactivation_count = deactivation_count.saturating_add(1);
if !no_leave_rooms {
info!("Forcing user {user_id} to leave all rooms apart of deactivate-all");
let all_joined_rooms = data::user::joined_rooms(&user_id)?;
full_user_deactivate(&user_id, &all_joined_rooms).await?;
membership::leave_all_rooms(&user_id).await?;
}
}
}
}
if admins.is_empty() {
write!(ctx, "Deactivated {deactivation_count} accounts.")
} else {
write!(
ctx,
"Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \
--force to deactivate admin accounts",
admins.join(", ")
)
}
.await
}
pub(super) async fn list_joined_rooms(ctx: &Context<'_>, user_id: String) -> AppResult<()> {
// Validate user id
let user_id = parse_local_user_id(&user_id)?;
let mut rooms: Vec<_> = data::user::joined_rooms(&user_id)?
.iter()
.map(|room_id| get_room_info(room_id))
.collect();
if rooms.is_empty() {
return Err(AppError::public("User is not in any rooms."));
}
rooms.sort_by_key(|r| r.joined_members);
rooms.reverse();
let body = rooms
.iter()
.map(|info| {
format!(
"{}\tMembers: {}\tName: {}",
info.id, info.joined_members, info.name
)
})
.collect::<Vec<_>>()
.join("\n");
ctx.write_str(&format!(
"Rooms {user_id} Joined ({}):\n```\n{body}\n```",
rooms.len(),
))
.await
}
pub(super) async fn force_join_list_of_local_users(
ctx: &Context<'_>,
room_id: OwnedRoomOrAliasId,
yes_i_want_to_do_this: bool,
) -> AppResult<()> {
if ctx.body.len() < 2
|| !ctx.body[0].trim().starts_with("```")
|| ctx.body.last().unwrap_or(&"").trim() != "```"
{
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
if !yes_i_want_to_do_this {
return Err(AppError::public(
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
bulk join all specified local users.",
));
}
let Ok(admin_room) = crate::room::get_admin_room() else {
return Err(AppError::public(
"There is not an admin room to check for server admins.",
));
};
let (room_id, servers) = crate::room::alias::resolve_with_servers(&room_id, None).await?;
if !crate::room::is_server_joined(config::server_name(), &room_id)? {
return Err(AppError::public("We are not joined in this room."));
}
let server_admins: Vec<_> = crate::room::active_local_users_in_room(&admin_room)?;
if !crate::room::joined_users(&room_id, None)?
.iter()
.any(|user_id| server_admins.contains(&user_id.to_owned()))
{
return Err(AppError::public(
"There is not a single server admin in the room.",
));
}
let usernames = ctx
.body
.to_vec()
.drain(1..ctx.body.len().saturating_sub(1))
.collect::<Vec<_>>();
let mut user_ids: Vec<OwnedUserId> = Vec::with_capacity(usernames.len());
for username in usernames {
match parse_active_local_user_id(username).await {
Ok(user_id) => {
// don't make the server service account join
if user_id == config::server_user_id() {
crate::admin::send_text(&format!(
"{username} is the server service account, skipping over"
))
.await?;
continue;
}
user_ids.push(user_id);
}
Err(e) => {
crate::admin::send_text(&format!(
"{username} is not a valid username, skipping over: {e}"
))
.await?;
continue;
}
}
}
let mut failed_joins: usize = 0;
let mut successful_joins: usize = 0;
for user_id in user_ids {
let user = data::user::get_user(&user_id)?;
match membership::join_room(
&user,
None,
&room_id,
Some(String::from(BULK_JOIN_REASON)),
&servers,
None,
None,
Default::default(),
)
.await
{
Ok(_res) => {
successful_joins = successful_joins.saturating_add(1);
}
Err(e) => {
warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}");
failed_joins = failed_joins.saturating_add(1);
}
}
}
ctx.write_str(&format!(
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
failed.",
))
.await
}
pub(super) async fn force_join_all_local_users(
ctx: &Context<'_>,
room_id: OwnedRoomOrAliasId,
yes_i_want_to_do_this: bool,
) -> AppResult<()> {
if !yes_i_want_to_do_this {
return Err(AppError::public(
"you must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
bulk join all local users.",
));
}
let Ok(admin_room) = crate::room::get_admin_room() else {
return Err(AppError::public(
"There is not an admin room to check for server admins.",
));
};
let (room_id, servers) = crate::room::alias::resolve_with_servers(&room_id, None).await?;
if !crate::room::is_server_joined(config::server_name(), &room_id)? {
return Err(AppError::public("we are not joined in this room"));
}
let server_admins: Vec<_> = crate::room::active_local_users_in_room(&admin_room)?;
if !crate::room::joined_users(&room_id, None)?
.iter()
.any(|user_id| server_admins.contains(&user_id.to_owned()))
{
return Err(AppError::public(
"there is not a single server admin in the room.",
));
}
let mut failed_joins: usize = 0;
let mut successful_joins: usize = 0;
for user_id in &data::user::list_local_users()? {
let user = data::user::get_user(user_id)?;
match membership::join_room(
&user,
None,
&room_id,
Some(String::from(BULK_JOIN_REASON)),
&servers,
None,
None,
Default::default(),
)
.await
{
Ok(_res) => {
successful_joins = successful_joins.saturating_add(1);
}
Err(e) => {
warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}");
failed_joins = failed_joins.saturating_add(1);
}
}
}
ctx.write_str(&format!(
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
failed.",
))
.await
}
pub(super) async fn force_join_room(
ctx: &Context<'_>,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> AppResult<()> {
let user_id = parse_local_user_id(&user_id)?;
let (room_id, servers) = crate::room::alias::resolve_with_servers(&room_id, None).await?;
assert!(user_id.is_local(), "parsed user_id must be a local user");
let user = data::user::get_user(&user_id)?;
membership::join_room(
&user,
None,
&room_id,
None,
&servers,
None,
None,
Default::default(),
)
.await?;
ctx.write_str(&format!("{user_id} has been joined to {room_id}.",))
.await
}
pub(super) async fn force_leave_room(
ctx: &Context<'_>,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> AppResult<()> {
let user_id = parse_local_user_id(&user_id)?;
let room_id = crate::room::alias::resolve(&room_id).await?;
assert!(user_id.is_local(), "parsed user_id must be a local user");
if !crate::room::user::is_joined(&user_id, &room_id)? {
return Err(AppError::public("{user_id} is not joined in the room"));
}
membership::leave_room(&user_id, &room_id, None).await?;
ctx.write_str(&format!("{user_id} has left {room_id}.",))
.await
}
pub(super) async fn force_demote(
ctx: &Context<'_>,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> AppResult<()> {
let user_id = parse_local_user_id(&user_id)?;
let room_id = crate::room::alias::resolve(&room_id).await?;
let room_version = crate::room::get_version(&room_id)?;
let version_rule = crate::room::get_version_rules(&room_version)?;
assert!(user_id.is_local(), "parsed user_id must be a local user");
let state_lock = crate::room::lock_state(&room_id).await;
let room_power_levels = crate::room::get_power_levels(&room_id).await.ok();
let user_can_demote_self =
room_power_levels.as_ref().is_some_and(|power_levels| {
power_levels.user_can_change_user_power_level(&user_id, &user_id)
}) || crate::room::get_state(&room_id, &StateEventType::RoomCreate, "", None)
.is_ok_and(|event| event.sender == user_id);
if !user_can_demote_self {
return Err(AppError::public(
"User is not allowed to modify their own power levels in the room.",
));
}
let mut power_levels_content: RoomPowerLevelsEventContent = room_power_levels
.map(TryInto::try_into)
.transpose()?
.unwrap_or_else(|| RoomPowerLevelsEventContent::new(&version_rule.authorization));
power_levels_content.users.remove(&user_id);
let event = timeline::build_and_append_pdu(
PduBuilder::state(String::new(), &power_levels_content),
&user_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
ctx.write_str(&format!(
"User {user_id} demoted themselves to the room default power level in {room_id} - \
{}",
event.event_id
))
.await
}
pub(super) async fn make_user_admin(ctx: &Context<'_>, user_id: String) -> AppResult<()> {
let user_id = parse_local_user_id(&user_id)?;
assert!(user_id.is_local(), "Parsed user_id must be a local user");
crate::user::make_user_admin(&user_id)?;
ctx.write_str(&format!("{user_id} has been granted admin privileges.",))
.await
}
pub(super) async fn put_room_tag(
ctx: &Context<'_>,
user_id: String,
room_id: OwnedRoomId,
tag: String,
) -> AppResult<()> {
let user_id = parse_active_local_user_id(&user_id).await?;
let mut tags_event_content = data::user::get_data::<TagEventContent>(
&user_id,
Some(&room_id),
&RoomAccountDataEventType::Tag.to_string(),
)
.unwrap_or_default();
tags_event_content
.tags
.insert(tag.clone().into(), TagInfo::new());
crate::user::set_data(
&user_id,
Some(room_id.clone()),
&RoomAccountDataEventType::Tag.to_string(),
serde_json::to_value(tags_event_content).expect("to json value always works"),
)?;
ctx.write_str(&format!(
"Successfully updated room account data for {user_id} and room {room_id} with tag {tag}"
))
.await
}
pub(super) async fn delete_room_tag(
ctx: &Context<'_>,
user_id: String,
room_id: OwnedRoomId,
tag: String,
) -> AppResult<()> {
let user_id = parse_active_local_user_id(&user_id).await?;
let mut tags_event_content = data::user::get_data::<TagEventContent>(
&user_id,
Some(&room_id),
&RoomAccountDataEventType::Tag.to_string(),
)
.unwrap_or_default();
tags_event_content.tags.remove(&tag.clone().into());
crate::user::set_data(
&user_id,
Some(room_id.clone()),
&RoomAccountDataEventType::Tag.to_string(),
serde_json::to_value(tags_event_content).expect("to json value always works"),
)?;
ctx.write_str(&format!(
"Successfully updated room account data for {user_id} and room {room_id}, deleting room \
tag {tag}"
))
.await
}
pub(super) async fn get_room_tags(
ctx: &Context<'_>,
user_id: String,
room_id: OwnedRoomId,
) -> AppResult<()> {
let user_id = parse_active_local_user_id(&user_id).await?;
let tags_event_content = data::user::get_data::<TagEventContent>(
&user_id,
Some(&room_id),
&RoomAccountDataEventType::Tag.to_string(),
)
.unwrap_or_default();
ctx.write_str(&format!("```\n{:#?}\n```", tags_event_content.tags))
.await
}
pub(super) async fn redact_event(ctx: &Context<'_>, event_id: OwnedEventId) -> AppResult<()> {
let Ok(Some(event)) = timeline::get_non_outlier_pdu(&event_id) else {
return Err(AppError::public("event does not exist in our database"));
};
// TODO: check if the user has permission to redact this event
// if event.is_redacted() {
// return Err(AppError::public("event is already redacted"));
// }
if !event.sender.is_local() {
return Err(AppError::public("this command only works on local users"));
}
let reason = format!(
"the administrator(s) of {} has redacted this user's message",
config::server_name()
);
let redaction_event = {
let state_lock = crate::room::lock_state(&event.room_id).await;
timeline::build_and_append_pdu(
PduBuilder {
redacts: Some(event.event_id.clone()),
..PduBuilder::timeline(&RoomRedactionEventContent {
redacts: Some(event.event_id.clone()),
reason: Some(reason),
})
},
&event.sender,
&event.room_id,
&crate::room::get_version(&event.room_id)?,
&state_lock,
)
.await?
};
ctx.write_str(&format!(
"Successfully redacted event. Redaction event ID: {}",
redaction_event.event_id
))
.await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/media/cmd.rs | crates/server/src/admin/media/cmd.rs | use std::time::Duration;
use crate::admin::{Context, utils::parse_local_user_id};
use crate::core::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
use crate::media::Dimension;
use crate::room::timeline;
use crate::{AppError, AppResult, IsRemoteOrLocal, config, data, utils::time::parse_timepoint_ago};
pub(super) async fn delete_media(
ctx: &Context<'_>,
mxc: Option<OwnedMxcUri>,
event_id: Option<OwnedEventId>,
) -> AppResult<()> {
if event_id.is_some() && mxc.is_some() {
return Err(AppError::public(
"Please specify either an MXC or an event ID, not both.",
));
}
if let Some(mxc) = mxc {
trace!("Got MXC URL: {mxc}");
data::media::delete_media(mxc.server_name()?, mxc.media_id()?)?;
return Err(AppError::public(
"Deleted the MXC from our database and on our filesystem.",
));
}
if let Some(event_id) = event_id {
trace!("Got event ID to delete media from: {event_id}");
let mut mxc_urls = Vec::with_capacity(4);
// parsing the PDU for any MXC URLs begins here
match timeline::get_pdu_json(&event_id) {
Ok(Some(event_json)) => {
if let Some(content_key) = event_json.get("content") {
debug!("Event ID has \"content\".");
let content_obj = content_key.as_object();
if let Some(content) = content_obj {
// 1. attempts to parse the "url" key
debug!("Attempting to go into \"url\" key for main media file");
if let Some(url) = content.get("url") {
debug!("Got a URL in the event ID {event_id}: {url}");
if url.to_string().starts_with("\"mxc://") {
debug!("Pushing URL {url} to list of MXCs to delete");
let final_url = url.to_string().replace('"', "");
mxc_urls.push(final_url);
} else {
info!(
"Found a URL in the event ID {event_id} but did not start \
with mxc://, ignoring"
);
}
}
// 2. attempts to parse the "info" key
debug!("Attempting to go into \"info\" key for thumbnails");
if let Some(info_key) = content.get("info") {
debug!("Event ID has \"info\".");
let info_obj = info_key.as_object();
if let Some(info) = info_obj {
if let Some(thumbnail_url) = info.get("thumbnail_url") {
debug!("Found a thumbnail_url in info key: {thumbnail_url}");
if thumbnail_url.to_string().starts_with("\"mxc://") {
debug!(
"Pushing thumbnail URL {thumbnail_url} to list of \
MXCs to delete"
);
let final_thumbnail_url =
thumbnail_url.to_string().replace('"', "");
mxc_urls.push(final_thumbnail_url);
} else {
info!(
"Found a thumbnail URL in the event ID {event_id} \
but did not start with mxc://, ignoring"
);
}
} else {
info!(
"No \"thumbnail_url\" key in \"info\" key, assuming no \
thumbnails."
);
}
}
}
// 3. attempts to parse the "file" key
debug!("Attempting to go into \"file\" key");
if let Some(file_key) = content.get("file") {
debug!("Event ID has \"file\".");
let file_obj = file_key.as_object();
if let Some(file) = file_obj {
if let Some(url) = file.get("url") {
debug!("Found url in file key: {url}");
if url.to_string().starts_with("\"mxc://") {
debug!("Pushing URL {url} to list of MXCs to delete");
let final_url = url.to_string().replace('"', "");
mxc_urls.push(final_url);
} else {
warn!(
"Found a URL in the event ID {event_id} but did not \
start with mxc://, ignoring"
);
}
} else {
error!("No \"url\" key in \"file\" key.");
}
}
}
} else {
return Err(AppError::public(
"Event ID does not have a \"content\" key or failed parsing the \
event ID JSON.",
));
}
} else {
return Err(AppError::public(
"Event ID does not have a \"content\" key, this is not a message or an \
event type that contains media.",
));
}
}
_ => {
return Err(AppError::public(
"Event ID does not exist or is not known to us.",
));
}
}
if mxc_urls.is_empty() {
return Err(AppError::public("Parsed event ID but found no MXC URLs."));
}
let mut mxc_deletion_count: usize = 0;
for mxc_url in mxc_urls {
let Mxc {
server_name,
media_id,
} = mxc_url.as_str().try_into()?;
match data::media::delete_media(server_name, media_id) {
Ok(()) => {
info!("Successfully deleted {mxc_url} from filesystem and database");
mxc_deletion_count = mxc_deletion_count.saturating_add(1);
}
Err(e) => {
warn!("Failed to delete {mxc_url}, ignoring error and skipping: {e}");
continue;
}
}
}
return ctx
.write_str(&format!(
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \
from event ID {event_id}."
))
.await;
}
Err(AppError::public(
"Please specify either an MXC using --mxc or an event ID using --event-id of the \
message containing an image. See --help for details.",
))
}
pub(super) async fn delete_media_list(ctx: &Context<'_>) -> AppResult<()> {
if ctx.body.len() < 2
|| !ctx.body[0].trim().starts_with("```")
|| ctx.body.last().unwrap_or(&"").trim() != "```"
{
return Err(AppError::public(
"Expected code block in command body. Add --help for details.",
));
}
let mut failed_parsed_mxcs: usize = 0;
let mxc_list = ctx
.body
.to_vec()
.drain(1..ctx.body.len().checked_sub(1).unwrap())
.filter_map(|mxc_s| {
mxc_s
.try_into()
.inspect_err(|e| {
warn!("Failed to parse user-provided MXC URI: {e}");
failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1);
})
.ok()
})
.collect::<Vec<Mxc<'_>>>();
let mut mxc_deletion_count: usize = 0;
for mxc in &mxc_list {
trace!(%failed_parsed_mxcs, %mxc_deletion_count, "Deleting MXC {mxc} in bulk");
match data::media::delete_media(mxc.server_name, mxc.media_id) {
Ok(()) => {
info!("Successfully deleted {mxc} from filesystem and database");
mxc_deletion_count = mxc_deletion_count.saturating_add(1);
}
Err(e) => {
warn!("Failed to delete {mxc}, ignoring error and skipping: {e}");
continue;
}
}
}
ctx.write_str(&format!(
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \
and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.",
))
.await
}
pub(super) async fn delete_past_remote_media(
ctx: &Context<'_>,
duration: String,
before: bool,
after: bool,
yes_i_want_to_delete_local_media: bool,
) -> AppResult<()> {
if before && after {
return Err(AppError::public(
"Please only pick one argument, --before or --after.",
));
}
assert!(
!(before && after),
"--before and --after should not be specified together"
);
let duration = parse_timepoint_ago(&duration)?;
let deleted_count = crate::media::delete_past_remote_media(
duration,
before,
after,
yes_i_want_to_delete_local_media,
)
.await?;
ctx.write_str(&format!("Deleted {deleted_count} total files.",))
.await
}
pub(super) async fn delete_all_media_from_user(
ctx: &Context<'_>,
username: String,
) -> AppResult<()> {
let user_id = parse_local_user_id(&username)?;
let deleted_count = crate::user::delete_all_media(&user_id).await?;
ctx.write_str(&format!("Deleted {deleted_count} total files.",))
.await
}
pub(super) async fn delete_all_media_from_server(
ctx: &Context<'_>,
server_name: OwnedServerName,
yes_i_want_to_delete_local_media: bool,
) -> AppResult<()> {
if server_name == config::server_name() && !yes_i_want_to_delete_local_media {
return Err(AppError::public(
"This command only works for remote media by default.",
));
}
let Ok(all_mxcs) = crate::media::get_all_mxcs()
.inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}"))
else {
return Err(AppError::public("Failed to get MXC URIs from our database"));
};
let mut deleted_count: usize = 0;
for mxc in all_mxcs {
let Ok(mxc_server_name) = mxc.server_name().inspect_err(|e| {
warn!(
"Failed to parse MXC {mxc} server name from database, ignoring error and \
skipping: {e}"
);
}) else {
continue;
};
if mxc_server_name != server_name
|| (mxc_server_name.is_local() && !yes_i_want_to_delete_local_media)
{
trace!("skipping MXC URI {mxc}");
continue;
}
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
match crate::media::delete_media(mxc.server_name, mxc.media_id).await {
Ok(()) => {
deleted_count = deleted_count.saturating_add(1);
}
Err(e) => {
warn!("Failed to delete {mxc}, ignoring error and skipping: {e}");
continue;
}
}
}
ctx.write_str(&format!("Deleted {deleted_count} total files.",))
.await
}
pub(super) async fn get_file_info(ctx: &Context<'_>, mxc: OwnedMxcUri) -> AppResult<()> {
let Mxc {
server_name,
media_id,
} = mxc.as_str().try_into()?;
let metadata = data::media::get_metadata(server_name, media_id)?;
ctx.write_str(&format!("```\n{metadata:#?}\n```")).await
}
// pub(super) async fn fetch_remote_file(
// ctx: &Context<'_>,
// mxc: OwnedMxcUri,
// server: Option<OwnedServerName>,
// timeout: u32,
// ) -> AppResult<()> {
// let mxc: Mxc<'_> = mxc.as_str().try_into()?;
// let timeout = Duration::from_millis(timeout.into());
// let mut result = crate::media::fetch_remote_content(&mxc, &mxc.server_name, &mxc.media_id)
// .await?;
// // Grab the length of the content before clearing it to not flood the output
// let len = result.content.as_ref().expect("content").len();
// result.content.as_mut().expect("content").clear();
// ctx.write_str(&format!(
// "```\n{result:#?}\nreceived {len} bytes for file content.\n```"
// ))
// .await
// }
pub(super) async fn fetch_remote_thumbnail(
ctx: &Context<'_>,
mxc: OwnedMxcUri,
server: Option<OwnedServerName>,
timeout: u32,
width: u32,
height: u32,
) -> AppResult<()> {
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
let timeout = Duration::from_millis(timeout.into());
let dim = Dimension::new(width, height, None);
let mut result =
crate::media::fetch_remote_thumbnail(&mxc, None, server.as_deref(), timeout, &dim).await?;
// Grab the length of the content before clearing it to not flood the output
let len = result.content.as_ref().expect("content").len();
result.content.as_mut().expect("content").clear();
ctx.write_str(&format!(
"```\n{result:#?}\nreceived {len} bytes for file content.\n```"
))
.await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/server/cmd.rs | crates/server/src/admin/server/cmd.rs | use std::{fmt::Write, path::PathBuf};
use crate::admin::Context;
use crate::{AppError, AppResult, config, info};
// pub(super) async fn uptime(ctx: &Context<'_>) -> AppResult<()> {
// // TODO: admin
// // let elapsed = self.services.server.started.elapsed().expect("standard duration");
// // let result = time::pretty(elapsed);
// // ctx.write_str(&format!("{result}.")).await
// Ok(())
// }
pub(super) async fn show_config(ctx: &Context<'_>) -> AppResult<()> {
ctx.write_str(&format!("{}", config::get())).await
}
pub(super) async fn reload_config(ctx: &Context<'_>, path: Option<PathBuf>) -> AppResult<()> {
// TODO: admin
// let path = path.as_deref();
// config::reload(path)?;
ctx.write_str("Successfully reconfigured.").await
}
pub(super) async fn list_features(
ctx: &Context<'_>,
available: bool,
enabled: bool,
comma: bool,
) -> AppResult<()> {
let delim = if comma { "," } else { " " };
if enabled && !available {
let features = info::rustc::features().join(delim);
let out = format!("`\n{features}\n`");
return ctx.write_str(&out).await;
}
if available && !enabled {
let features = info::cargo::features().join(delim);
let out = format!("`\n{features}\n`");
return ctx.write_str(&out).await;
}
let mut features = String::new();
let enabled = info::rustc::features();
let available = info::cargo::features();
for feature in available {
let active = enabled.contains(&feature.as_str());
let emoji = if active { "✅" } else { "❌" };
let remark = if active { "[enabled]" } else { "" };
writeln!(features, "{emoji} {feature} {remark}")?;
}
ctx.write_str(&features).await
}
// pub(super) async fn clear_caches(ctx: &Context<'_>) -> AppResult<()> {
// clear_cache(ctx).await;
// ctx.write_str("Done.").await
// }
pub(super) async fn admin_notice(ctx: &Context<'_>, message: Vec<String>) -> AppResult<()> {
let message = message.join(" ");
crate::admin::send_text(&message).await?;
ctx.write_str("Notice was sent to #admins").await
}
pub(super) async fn reload_mods(ctx: &Context<'_>) -> AppResult<()> {
// TODO: reload mods
ctx.write_str("(TODO) Reloading server...").await
}
pub(super) async fn restart(ctx: &Context<'_>, force: bool) -> AppResult<()> {
use crate::utils::sys::current_exe_deleted;
if !force && current_exe_deleted() {
return Err(AppError::public(
"The server cannot be restarted because the executable changed. If this is expected \
use --force to override.",
));
}
// TODO: restart server
ctx.write_str("(TODO) Restarting server...").await
}
pub(super) async fn shutdown(ctx: &Context<'_>) -> AppResult<()> {
warn!("shutdown command");
// TODO: shutdown server
ctx.write_str("(TODO) Shutting down server...").await
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/admin/debug/cmd.rs | crates/server/src/admin/debug/cmd.rs | mod cmd;
use clap::Subcommand;
use crate::core::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName};
use crate::AppResult;
#[derive(Debug, Subcommand)]
pub(super) enum DebugCommand {
/// - Echo input of admin command
Echo {
message: Vec<String>,
},
/// - Get the auth_chain of a PDU
GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash)
event_id: OwnedEventId,
},
/// - Parse and print a PDU from a JSON
///
/// The PDU event is only checked for validity and is not added to the
/// database.
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
ParsePdu,
/// - Retrieve and print a PDU by EventID from the palpo database
GetPdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: OwnedEventId,
},
/// - Retrieve and print a PDU by PduId from the palpo database
GetShortPdu {
/// Shortroomid integer
shortroomid: ShortRoomId,
/// Shorteventid integer
shorteventid: ShortEventId,
},
// /// - Attempts to retrieve a PDU from a remote server. Inserts it into our
// /// database/timeline if found and we do not have this PDU already
// /// (following normal event auth rules, handles it as an incoming PDU).
// GetRemotePdu {
// /// An event ID (a $ followed by the base64 reference hash)
// event_id: OwnedEventId,
// /// Argument for us to attempt to fetch the event from the
// /// specified remote server.
// server: OwnedServerName,
// },
// /// - Same as `get-remote-pdu` but accepts a codeblock newline delimited
// /// list of PDUs and a single server to fetch from
// GetRemotePduList {
// /// Argument for us to attempt to fetch all the events from the
// /// specified remote server.
// server: OwnedServerName,
// /// If set, ignores errors, else stops at the first error/failure.
// #[arg(short, long)]
// force: bool,
// },
/// - Gets all the room state events for the specified room.
///
/// This is functionally equivalent to `GET
/// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does
/// *not* check if the sender user is allowed to see state events. This is
/// done because it's implied that server admins here have database access
/// and can see/get room info themselves anyways if they were malicious
/// admins.
///
/// Of course the check is still done on the actual client API.
GetRoomState {
/// Room ID
room_id: OwnedRoomOrAliasId,
},
/// - Get and display signing keys from local cache or remote server.
GetSigningKeys {
server_name: Option<OwnedServerName>,
#[arg(long)]
notary: Option<OwnedServerName>,
#[arg(short, long)]
query: bool,
},
/// - Get and display signing keys from local cache or remote server.
GetVerifyKeys {
server_name: Option<OwnedServerName>,
},
/// - Sends a federation request to the remote server's
/// `/_matrix/federation/v1/version` endpoint and measures the latency it
/// took for the server to respond
Ping {
server: OwnedServerName,
},
/// - Forces device lists for all local and remote users to be updated (as
/// having new keys available)
ForceDeviceListUpdates,
/// - Change tracing log level/filter on the fly
///
/// This accepts the same format as the `log` config option.
ChangeLogLevel {
/// Log level/filter
filter: Option<String>,
/// Resets the log level/filter to the one in your config
#[arg(short, long)]
reset: bool,
},
/// - Verify json signatures
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
SignJson,
/// - Verify json signatures
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
VerifyJson,
/// - Verify PDU
///
/// This re-verifies a PDU existing in the database found by ID.
VerifyPdu {
event_id: OwnedEventId,
},
/// - Prints the very first PDU in the specified room (typically
/// m.room.create)
FirstPduInRoom {
/// The room ID
room_id: OwnedRoomId,
},
/// - Prints the latest ("last") PDU in the specified room (typically a
/// message)
LatestPduInRoom {
/// The room ID
room_id: OwnedRoomId,
},
/// - Forcefully replaces the room state of our local copy of the specified
/// room, with the copy (auth chain and room state events) the specified
/// remote server says.
///
/// A common desire for room deletion is to simply "reset" our copy of the
/// room. While this admin command is not a replacement for that, if you
/// know you have split/broken room state and you know another server in the
/// room that has the best/working room state, this command can let you use
/// their room state. Such example is your server saying users are in a
/// room, but other servers are saying they're not in the room in question.
///
/// This command will get the latest PDU in the room we know about, and
/// request the room state at that point in time via
/// `/_matrix/federation/v1/state/{roomId}`.
ForceSetRoomStateFromServer {
/// The impacted room ID
room_id: OwnedRoomId,
/// The server we will use to query the room state for
server_name: OwnedServerName,
},
/// - Runs a server name through palpo's true destination resolution
/// process
///
/// Useful for debugging well-known issues
ResolveTrueDestination {
server_name: OwnedServerName,
#[arg(short, long)]
no_cache: bool,
},
/// - Print extended memory usage
///
/// Optional argument is a character mask (a sequence of characters in any
/// order) which enable additional extended statistics. Known characters are
/// "abdeglmx". For convenience, a '*' will enable everything.
MemoryStats {
opts: Option<String>,
},
/// - Print general tokio runtime metric totals.
RuntimeMetrics,
/// - Print detailed tokio runtime metrics accumulated since last command
/// invocation.
RuntimeInterval,
/// - Print the current time
Time,
/// - List dependencies
ListDependencies {
#[arg(short, long)]
names: bool,
},
/// - Get database statistics
DatabaseStats {
property: Option<String>,
#[arg(short, long, alias("column"))]
map: Option<String>,
},
/// - Trim memory usage
TrimMemory,
/// - List database files
DatabaseFiles {
map: Option<String>,
#[arg(long)]
level: Option<i32>,
},
/// - Create a JWT token for login
CreateJwt {
/// Localpart of the user's MXID
user: String,
/// Set expiration time in seconds from now.
#[arg(long)]
exp_from_now: Option<u64>,
/// Set not-before time in seconds from now.
#[arg(long)]
nbf_from_now: Option<u64>,
/// Claim an issuer.
#[arg(long)]
issuer: Option<String>,
/// Claim an audience.
#[arg(long)]
audience: Option<String>,
},
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/stream.rs | crates/server/src/utils/stream.rs | use futures_util::{
StreamExt, stream,
stream::{Stream, TryStream},
};
pub trait IterStream<I: IntoIterator + Send> {
/// Convert an Iterator into a Stream
fn stream(self) -> impl Stream<Item = <I as IntoIterator>::Item> + Send;
/// Convert an Iterator into a TryStream
fn try_stream(
self,
) -> impl TryStream<Ok = <I as IntoIterator>::Item, Error = crate::AppError> + Send;
}
impl<I> IterStream<I> for I
where
I: IntoIterator + Send,
<I as IntoIterator>::IntoIter: Send,
{
#[inline]
fn stream(self) -> impl Stream<Item = <I as IntoIterator>::Item> + Send {
stream::iter(self)
}
#[inline]
fn try_stream(
self,
) -> impl TryStream<Ok = <I as IntoIterator>::Item, Error = crate::AppError> + Send {
self.stream().map(Ok)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/mutex_map.rs | crates/server/src/utils/mutex_map.rs | use std::collections::HashMap;
use std::{
fmt::Debug,
hash::Hash,
sync::{Arc, TryLockError::WouldBlock},
};
use tokio::sync::OwnedMutexGuard as Omg;
use crate::{AppError, AppResult};
/// Map of Mutexes
pub struct MutexMap<Key, Val> {
map: Arc<std::sync::Mutex<HashMap<Key, Arc<tokio::sync::Mutex<Val>>>>>,
}
pub struct MutexMapGuard<Key, Val> {
map: Arc<std::sync::Mutex<HashMap<Key, Arc<tokio::sync::Mutex<Val>>>>>,
val: Omg<Val>,
}
impl<Key, Val> MutexMap<Key, Val>
where
Key: Clone + Eq + Hash + Send,
Val: Default + Send,
{
#[must_use]
pub fn new() -> Self {
Self {
map: Default::default(),
}
}
#[tracing::instrument(level = "trace", skip(self))]
pub async fn lock<'a, K>(&'a self, k: &'a K) -> MutexMapGuard<Key, Val>
where
K: Debug + Send + ?Sized + Sync,
Key: TryFrom<&'a K>,
<Key as TryFrom<&'a K>>::Error: Debug,
{
let val = self
.map
.lock()
.expect("locked")
.entry(k.try_into().expect("failed to construct key"))
.or_default()
.clone();
MutexMapGuard::<Key, Val> {
map: Arc::clone(&self.map),
val: val.lock_owned().await,
}
}
#[tracing::instrument(level = "trace", skip(self))]
pub fn try_lock<'a, K>(&self, k: &'a K) -> AppResult<MutexMapGuard<Key, Val>>
where
K: Debug + Send + ?Sized + Sync,
Key: TryFrom<&'a K>,
<Key as TryFrom<&'a K>>::Error: Debug,
{
let val = self
.map
.lock()
.expect("locked")
.entry(k.try_into().expect("failed to construct key"))
.or_default()
.clone();
Ok(MutexMapGuard::<Key, Val> {
map: Arc::clone(&self.map),
val: val
.try_lock_owned()
.map_err(|_| AppError::internal("would yield"))?,
})
}
#[tracing::instrument(level = "trace", skip(self))]
pub fn try_try_lock<'a, K>(&self, k: &'a K) -> AppResult<MutexMapGuard<Key, Val>>
where
K: Debug + Send + ?Sized + Sync,
Key: TryFrom<&'a K>,
<Key as TryFrom<&'a K>>::Error: Debug,
{
let val = self
.map
.try_lock()
.map_err(|e| match e {
WouldBlock => AppError::internal("would block"),
_ => panic!("{e:?}"),
})?
.entry(k.try_into().expect("failed to construct key"))
.or_default()
.clone();
Ok(MutexMapGuard::<Key, Val> {
map: Arc::clone(&self.map),
val: val
.try_lock_owned()
.map_err(|_| AppError::internal("would yield"))?,
})
}
#[must_use]
pub fn contains(&self, k: &Key) -> bool {
self.map.lock().expect("locked").contains_key(k)
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.map.lock().expect("locked").is_empty()
}
#[must_use]
pub fn len(&self) -> usize {
self.map.lock().expect("locked").len()
}
}
impl<Key, Val> Default for MutexMap<Key, Val>
where
Key: Clone + Eq + Hash + Send,
Val: Default + Send,
{
fn default() -> Self {
Self::new()
}
}
impl<Key, Val> Drop for MutexMapGuard<Key, Val> {
#[tracing::instrument(name = "unlock", level = "trace", skip_all)]
fn drop(&mut self) {
if Arc::strong_count(Omg::mutex(&self.val)) <= 2 {
self.map.lock().expect("locked").retain(|_, val| {
!Arc::ptr_eq(val, Omg::mutex(&self.val)) || Arc::strong_count(val) > 2
});
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/string.rs | crates/server/src/utils/string.rs | mod between;
mod split;
mod tests;
mod unquote;
mod unquoted;
pub use self::{between::Between, split::SplitInfallible, unquote::Unquote, unquoted::Unquoted};
use crate::AppResult;
pub const EMPTY: &str = "";
/// Constant expression to bypass format! if the argument is a string literal
/// but not a format string. If the literal is a format string then String is
/// returned otherwise the input (i.e. &'static str) is returned. If multiple
/// arguments are provided the first is assumed to be a format string.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! format_maybe {
($s:literal $(,)?) => {
if $crate::is_format!($s) { std::format!($s).into() } else { $s.into() }
};
($s:literal, $($args:tt)+) => {
std::format!($s, $($args)+).into()
};
}
/// Constant expression to decide if a literal is a format string. Note: could
/// use some improvement.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! is_format {
($s:literal) => {
::const_str::contains!($s, "{") && ::const_str::contains!($s, "}")
};
($($s:tt)+) => {
false
};
}
#[inline]
pub fn collect_stream<F>(func: F) -> AppResult<String>
where
F: FnOnce(&mut dyn std::fmt::Write) -> AppResult<()>,
{
let mut out = String::new();
func(&mut out)?;
Ok(out)
}
#[inline]
#[must_use]
pub fn camel_to_snake_string(s: &str) -> String {
let est_len = s.chars().fold(s.len(), |est, c| {
est.saturating_add(usize::from(c.is_ascii_uppercase()))
});
let mut ret = String::with_capacity(est_len);
camel_to_snake_case(&mut ret, s.as_bytes()).expect("string-to-string stream error");
ret
}
#[inline]
#[allow(clippy::unbuffered_bytes)] // these are allocated string utilities, not file I/O utils
pub fn camel_to_snake_case<I, O>(output: &mut O, input: I) -> AppResult<()>
where
I: std::io::Read,
O: std::fmt::Write,
{
let mut state = false;
input
.bytes()
.take_while(Result::is_ok)
.map(Result::unwrap)
.map(char::from)
.try_for_each(|ch| {
let m = ch.is_ascii_uppercase();
let s = std::mem::replace(&mut state, !m);
if m && s {
output.write_char('_')?;
}
output.write_char(ch.to_ascii_lowercase())?;
Ok(())
})
}
/// Find the common prefix from a collection of strings and return a slice
/// ```
/// use tuwunel_core::utils::string::common_prefix;
/// let input = ["conduwuit", "conduit", "construct"];
/// common_prefix(&input) == "con";
/// ```
#[must_use]
#[allow(clippy::string_slice)]
pub fn common_prefix<'a>(choice: &'a [&str]) -> &'a str {
choice.first().map_or(EMPTY, move |best| {
choice.iter().skip(1).fold(*best, |best, choice| {
&best[0..choice
.char_indices()
.zip(best.char_indices())
.take_while(|&(a, b)| a == b)
.count()]
})
})
}
/// Parses the bytes into a string.
pub fn string_from_bytes(bytes: &[u8]) -> AppResult<String> {
let str: &str = str_from_bytes(bytes)?;
Ok(str.to_owned())
}
/// Parses the bytes into a string.
#[inline]
pub fn str_from_bytes(bytes: &[u8]) -> AppResult<&str> {
Ok(std::str::from_utf8(bytes)?)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/content_disposition.rs | crates/server/src/utils/content_disposition.rs | use std::borrow::Cow;
use crate::core::http_headers::{ContentDisposition, ContentDispositionType};
/// as defined by MSC2702
const ALLOWED_INLINE_CONTENT_TYPES: [&str; 26] = [
// keep sorted
"application/json",
"application/ld+json",
"audio/aac",
"audio/flac",
"audio/mp4",
"audio/mpeg",
"audio/ogg",
"audio/wav",
"audio/wave",
"audio/webm",
"audio/x-flac",
"audio/x-pn-wav",
"audio/x-wav",
"image/apng",
"image/avif",
"image/gif",
"image/jpeg",
"image/png",
"image/webp",
"text/css",
"text/csv",
"text/plain",
"video/mp4",
"video/ogg",
"video/quicktime",
"video/webm",
];
/// Returns a Content-Disposition of `attachment` or `inline`, depending on the
/// Content-Type against MSC2702 list of safe inline Content-Types
/// (`ALLOWED_INLINE_CONTENT_TYPES`)
#[must_use]
pub fn content_disposition_type(content_type: Option<&str>) -> ContentDispositionType {
let Some(content_type) = content_type else {
tracing::info!("No Content-Type was given, assuming attachment for Content-Disposition");
return ContentDispositionType::Attachment;
};
debug_assert!(
ALLOWED_INLINE_CONTENT_TYPES.is_sorted(),
"ALLOWED_INLINE_CONTENT_TYPES is not sorted"
);
let content_type: Cow<'_, str> = content_type
.split(';')
.next()
.unwrap_or(content_type)
.to_ascii_lowercase()
.into();
if ALLOWED_INLINE_CONTENT_TYPES
.binary_search(&content_type.as_ref())
.is_ok()
{
ContentDispositionType::Inline
} else {
ContentDispositionType::Attachment
}
}
/// sanitises the file name for the Content-Disposition using
/// `sanitize_filename` crate
#[tracing::instrument(level = "debug")]
pub fn sanitise_filename(filename: &str) -> String {
sanitize_filename::sanitize_with_options(
filename,
sanitize_filename::Options {
truncate: false,
..Default::default()
},
)
}
/// creates the final Content-Disposition based on whether the filename exists
/// or not, or if a requested filename was specified (media download with
/// filename)
///
/// if filename exists:
/// `Content-Disposition: attachment/inline; filename=filename.ext`
///
/// else: `Content-Disposition: attachment/inline`
pub fn make_content_disposition(
disposition_type: Option<ContentDispositionType>,
content_type: Option<&str>,
filename: Option<&str>,
) -> ContentDisposition {
let disposition_type =
disposition_type.unwrap_or_else(|| content_disposition_type(content_type));
ContentDisposition::new(disposition_type).with_filename(filename.map(sanitise_filename))
}
#[cfg(test)]
mod tests {
#[test]
fn string_sanitisation() {
const SAMPLE: &str = "🏳️⚧️this\\r\\n įs \r\\n ä \\r\nstrïng 🥴that\n\r \
../../../../../../../may be\r\n malicious🏳️⚧️";
const SANITISED: &str = "🏳️⚧️thisrn įs n ä rstrïng 🥴that ..............may be malicious🏳️⚧️";
let options = sanitize_filename::Options {
windows: true,
truncate: true,
replacement: "",
};
// cargo test -- --nocapture
println!("{SAMPLE}");
println!(
"{}",
sanitize_filename::sanitize_with_options(SAMPLE, options.clone())
);
println!("{SAMPLE:?}");
println!(
"{:?}",
sanitize_filename::sanitize_with_options(SAMPLE, options.clone())
);
assert_eq!(
SANITISED,
sanitize_filename::sanitize_with_options(SAMPLE, options.clone())
);
}
// #[test]
// fn empty_sanitisation() {
// use crate::EMPTY;
// let result = sanitize_filename::sanitize_with_options(
// EMPTY,
// sanitize_filename::Options {
// windows: true,
// truncate: true,
// replacement: "",
// },
// );
// assert_eq!(EMPTY, result);
// }
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/fs.rs | crates/server/src/utils/fs.rs | use std::ffi::OsStr;
use std::fs::File;
use std::io::BufReader;
use std::path::{Path, PathBuf};
use serde::Serialize;
use serde::de::DeserializeOwned;
use crate::AppResult;
pub fn is_safe_dir_path(dir_path: &str) -> bool {
!dir_path.contains('.')
&& !dir_path.contains(':')
&& !dir_path.contains('\\')
&& !dir_path.starts_with('/')
}
pub struct TempPath(String);
impl TempPath {
pub fn new(path: impl Into<String>) -> Self {
TempPath(path.into())
}
}
impl AsRef<str> for TempPath {
fn as_ref(&self) -> &str {
&self.0
}
}
impl Drop for TempPath {
fn drop(&mut self) {
::std::fs::remove_dir_all(&self.0).ok();
}
}
fn file_name_sanitized(file_name: &str) -> ::std::path::PathBuf {
let no_null_filename = match file_name.find('\0') {
Some(index) => &file_name[0..index],
None => file_name,
}
.to_string();
// zip files can contain both / and \ as separators regardless of the OS
// and as we want to return a sanitized PathBuf that only supports the
// OS separator let's convert incompatible separators to compatible ones
let separator = ::std::path::MAIN_SEPARATOR;
let opposite_separator = match separator {
'/' => '\\',
_ => '/',
};
let filename =
no_null_filename.replace(&opposite_separator.to_string(), &separator.to_string());
::std::path::Path::new(&filename)
.components()
.filter(|component| matches!(*component, ::std::path::Component::Normal(..)))
.fold(::std::path::PathBuf::new(), |mut path, ref cur| {
path.push(cur.as_os_str());
path
})
}
pub fn get_file_ext<P: AsRef<Path>>(path: P) -> String {
path.as_ref()
.extension()
.and_then(OsStr::to_str)
.unwrap_or_default()
.to_lowercase()
}
pub fn read_json<T: DeserializeOwned, P: AsRef<Path>>(path: P) -> AppResult<T> {
let file = File::open(path.as_ref())?;
let reader = BufReader::new(file);
Ok(serde_json::from_reader::<_, T>(reader)?)
}
pub fn write_json<P: AsRef<Path>, C: Serialize>(
path: P,
contents: C,
pretty: bool,
) -> AppResult<()> {
std::fs::create_dir_all(get_parent_dir(path.as_ref()))?;
if pretty {
std::fs::write(path, serde_json::to_vec_pretty(&contents)?)?;
} else {
std::fs::write(path, serde_json::to_vec(&contents)?)?;
}
Ok(())
}
pub fn write_text<P: AsRef<Path>, C: AsRef<[u8]>>(path: P, contents: C) -> AppResult<()> {
std::fs::create_dir_all(get_parent_dir(path.as_ref()))?;
std::fs::write(path, contents)?;
Ok(())
}
pub fn get_parent_dir<T>(path: T) -> PathBuf
where
T: AsRef<Path>,
{
let mut parent_dir = path.as_ref().to_owned();
parent_dir.pop();
parent_dir
}
pub fn is_image_ext(ext: &str) -> bool {
["gif", "jpg", "jpeg", "webp", "avif", "png", "svg"].contains(&ext)
}
pub fn is_video_ext(ext: &str) -> bool {
["mp4", "mov", "avi", "wmv", "webm"].contains(&ext)
}
pub fn is_audio_ext(ext: &str) -> bool {
["mp3", "flac", "wav", "aac", "ogg", "alac", "wma", "m4a"].contains(&ext)
}
pub fn is_font_ext(ext: &str) -> bool {
["ttf", "otf", "woff", "woff2"].contains(&ext)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/sequm_queue.rs | crates/server/src/utils/sequm_queue.rs | use std::collections::BTreeSet;
use std::fmt::{self, Debug, Display, Formatter};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use crate::core::Seqnum;
#[derive(Debug, Default)]
pub struct SeqnumQueue {
queue: Arc<std::sync::Mutex<BTreeSet<Seqnum>>>,
}
pub struct SeqnumQueueFuture {
queue: Arc<std::sync::Mutex<BTreeSet<Seqnum>>>,
value: Seqnum,
}
impl Future for SeqnumQueueFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let queue = self.queue.lock().expect("locked");
if let Some(first) = queue.first() {
if first > &self.value {
Poll::Ready(())
} else {
cx.waker().wake_by_ref();
Poll::Pending
}
} else {
Poll::Ready(())
}
}
}
pub struct SeqnumQueueGuard {
queue: Arc<std::sync::Mutex<BTreeSet<Seqnum>>>,
value: Seqnum,
}
impl SeqnumQueue {
pub fn new() -> Self {
Self {
queue: Default::default(),
}
}
pub fn push(&self, sn: Seqnum) -> SeqnumQueueGuard {
let mut queue = self.queue.lock().expect("locked");
queue.insert(sn);
SeqnumQueueGuard {
queue: Arc::clone(&self.queue),
value: sn,
}
}
pub fn reach(&self, sn: Seqnum) -> SeqnumQueueFuture {
SeqnumQueueFuture {
queue: Arc::clone(&self.queue),
value: sn,
}
}
pub fn contains(&self, sn: Seqnum) -> bool {
self.queue.lock().expect("locked").contains(&sn)
}
pub fn is_empty(&self) -> bool {
self.queue.lock().expect("locked").is_empty()
}
pub fn len(&self) -> usize {
self.queue.lock().expect("locked").len()
}
}
impl Drop for SeqnumQueueGuard {
fn drop(&mut self) {
let mut queue = self.queue.lock().expect("locked");
queue.remove(&self.value);
}
}
impl Display for SeqnumQueueGuard {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "SeqnumQueueGuard({})", self.value)
}
}
impl Debug for SeqnumQueueGuard {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "SeqnumQueueGuard({})", self.value)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/time.rs | crates/server/src/utils/time.rs | use std::time::{Duration, SystemTime, UNIX_EPOCH};
use crate::{AppError, AppResult};
#[inline]
#[must_use]
#[allow(clippy::as_conversions, clippy::cast_possible_truncation)]
pub fn now_millis() -> u64 {
UNIX_EPOCH
.elapsed()
.expect("positive duration after epoch")
.as_millis() as u64
}
#[inline]
pub fn parse_timepoint_ago(ago: &str) -> AppResult<SystemTime> {
timepoint_ago(parse_duration(ago)?)
}
#[inline]
pub fn timepoint_ago(duration: Duration) -> AppResult<SystemTime> {
SystemTime::now()
.checked_sub(duration)
.ok_or_else(|| AppError::public(format!("Duration {duration:?} is too large")))
}
#[inline]
pub fn timepoint_from_now(duration: Duration) -> AppResult<SystemTime> {
SystemTime::now()
.checked_add(duration)
.ok_or_else(|| AppError::public(format!("Duration {duration:?} is too large")))
}
#[inline]
pub fn parse_duration(duration: &str) -> AppResult<Duration> {
cyborgtime::parse_duration(duration).map_err(|e| {
AppError::public(format!(
"'{duration:?}' is not a valid duration string: {e:?}"
))
})
}
#[must_use]
pub fn rfc2822_from_seconds(epoch: i64) -> String {
use chrono::{DateTime, Utc};
DateTime::<Utc>::from_timestamp(epoch, 0)
.unwrap_or_default()
.to_rfc2822()
}
#[must_use]
pub fn format(ts: SystemTime, str: &str) -> String {
use chrono::{DateTime, Utc};
let dt: DateTime<Utc> = ts.into();
dt.format(str).to_string()
}
#[must_use]
#[allow(
clippy::as_conversions,
clippy::cast_possible_truncation,
clippy::cast_sign_loss
)]
pub fn pretty(d: Duration) -> String {
use Unit::*;
let fmt = |w, f, u| format!("{w}.{f} {u}");
let gen64 = |w, f, u| fmt(w, (f * 100.0) as u32, u);
let gen128 = |w, f, u| gen64(u64::try_from(w).expect("u128 to u64"), f, u);
match whole_and_frac(d) {
(Days(whole), frac) => gen64(whole, frac, "days"),
(Hours(whole), frac) => gen64(whole, frac, "hours"),
(Mins(whole), frac) => gen64(whole, frac, "minutes"),
(Secs(whole), frac) => gen64(whole, frac, "seconds"),
(Millis(whole), frac) => gen128(whole, frac, "milliseconds"),
(Micros(whole), frac) => gen128(whole, frac, "microseconds"),
(Nanos(whole), frac) => gen128(whole, frac, "nanoseconds"),
}
}
/// Return a pair of (whole part, frac part) from a duration where. The whole
/// part is the largest Unit containing a non-zero value, the frac part is a
/// rational remainder left over.
#[must_use]
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
pub fn whole_and_frac(d: Duration) -> (Unit, f64) {
use Unit::*;
let whole = whole_unit(d);
(
whole,
match whole {
Days(_) => (d.as_secs() % 86_400) as f64 / 86_400.0,
Hours(_) => (d.as_secs() % 3_600) as f64 / 3_600.0,
Mins(_) => (d.as_secs() % 60) as f64 / 60.0,
Secs(_) => f64::from(d.subsec_millis()) / 1000.0,
Millis(_) => f64::from(d.subsec_micros()) / 1000.0,
Micros(_) => f64::from(d.subsec_nanos()) / 1000.0,
Nanos(_) => 0.0,
},
)
}
/// Return the largest Unit which represents the duration. The value is
/// rounded-down, but never zero.
#[must_use]
pub fn whole_unit(d: Duration) -> Unit {
use Unit::*;
match d.as_secs() {
86_400.. => Days(d.as_secs() / 86_400),
3_600..=86_399 => Hours(d.as_secs() / 3_600),
60..=3_599 => Mins(d.as_secs() / 60),
_ => match d.as_micros() {
1_000_000.. => Secs(d.as_secs()),
1_000..=999_999 => Millis(d.subsec_millis().into()),
_ => match d.as_nanos() {
1_000.. => Micros(d.subsec_micros().into()),
_ => Nanos(d.subsec_nanos().into()),
},
},
}
}
#[derive(Eq, PartialEq, Clone, Copy, Debug)]
pub enum Unit {
Days(u64),
Hours(u64),
Mins(u64),
Secs(u64),
Millis(u128),
Micros(u128),
Nanos(u128),
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/defer.rs | crates/server/src/utils/defer.rs | #[macro_export]
macro_rules! defer {
($body:block) => {
struct _Defer_<F: FnMut()> {
closure: F,
}
impl<F: FnMut()> Drop for _Defer_<F> {
fn drop(&mut self) {
(self.closure)();
}
}
let _defer_ = _Defer_ { closure: || $body };
};
($body:expr_2021) => {
$crate::defer! {{ $body }}
};
}
#[macro_export]
macro_rules! scope_restore {
($val:ident, $ours:expr_2021) => {
let theirs = $crate::utils::exchange($val, $ours);
$crate::defer! {{ *$val = theirs; }};
};
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/hash.rs | crates/server/src/utils/hash.rs | use std::fmt::Write;
use std::path::Path;
use chksum::{SHA2_256, sha2_256};
use fast32::base32::CROCKFORD;
#[derive(Debug)]
pub struct Checksum(Vec<u8>);
impl Checksum {
pub fn as_bytes(&self) -> &[u8] {
self.0.as_ref()
}
pub fn to_base32_crockford(&self) -> String {
CROCKFORD.encode(self.as_bytes())
}
pub fn to_hex_uppercase(&self) -> String {
let mut result = String::with_capacity(self.0.len() * 2);
for b in &self.0 {
write!(result, "{b:02X}").unwrap();
}
result
}
}
pub fn base32_crockford(data: &[u8]) -> String {
CROCKFORD.encode(data)
}
pub fn hash_file_sha2_256(file: impl AsRef<Path>) -> Result<Checksum, std::io::Error> {
let digest = sha2_256::chksum(file.as_ref()).unwrap();
Ok(Checksum(digest.as_bytes().to_vec()))
}
pub fn hash_data_sha2_256(data: &[u8]) -> Result<Checksum, std::io::Error> {
let digest = sha2_256::chksum(data).unwrap();
Ok(Checksum(digest.as_bytes().to_vec()))
}
//https://docs.rs/crate/checksums/0.6.0/source/src/hashing/mod.rs
pub fn hash_string(bytes: &[u8]) -> String {
let mut result = String::with_capacity(bytes.len() * 2);
for b in bytes {
write!(result, "{b:02X}").unwrap();
}
result
}
/// Calculate a new has h for the given password
pub fn hash_password(password: &str) -> Result<String, argon2::Error> {
let hashing_conf = argon2::Config {
variant: argon2::Variant::Argon2id,
..Default::default()
};
let salt = super::random_string(32);
argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_conf)
}
#[tracing::instrument(skip(keys))]
pub fn hash_keys<'a, T, I>(keys: I) -> Vec<u8>
where
I: Iterator<Item = T> + 'a,
T: AsRef<[u8]> + 'a,
{
let mut hash = SHA2_256::new();
for key in keys {
hash.update(key.as_ref());
}
hash.digest().as_bytes().to_vec()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/sys.rs | crates/server/src/utils/sys.rs | pub mod compute;
use std::path::PathBuf;
pub use compute::available_parallelism;
use crate::AppResult;
// /// This is needed for opening lots of file descriptors, which tends to
// /// happen more often when using RocksDB and making lots of federation
// /// connections at startup. The soft limit is usually 1024, and the hard
// /// limit is usually 512000; I've personally seen it hit >2000.
// ///
// /// * <https://www.freedesktop.org/software/systemd/man/systemd.exec.html#id-1.12.2.1.17.6>
// /// * <https://github.com/systemd/systemd/commit/0abf94923b4a95a7d89bc526efc84e7ca2b71741>
// #[cfg(unix)]
// pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> {
// use nix::sys::resource::{Resource::RLIMIT_NOFILE as NOFILE, getrlimit, setrlimit};
// let (soft_limit, hard_limit) = getrlimit(NOFILE)?;
// if soft_limit < hard_limit {
// setrlimit(NOFILE, hard_limit, hard_limit)?;
// assert_eq!((hard_limit, hard_limit), getrlimit(NOFILE)?, "getrlimit != setrlimit");
// tracing::debug!(to = hard_limit, from = soft_limit, "Raised RLIMIT_NOFILE",);
// }
// Ok(())
// }
/// Return a possibly corrected std::env::current_exe() even if the path is
/// marked deleted.
///
/// # Safety
/// This function is declared unsafe because the original result was altered for
/// security purposes, and altering it back ignores those urposes and should be
/// understood by the user.
pub unsafe fn current_exe() -> AppResult<PathBuf> {
let exe = std::env::current_exe()?;
match exe.to_str() {
None => Ok(exe),
Some(str) => Ok(str
.strip_suffix(" (deleted)")
.map(PathBuf::from)
.unwrap_or(exe)),
}
}
/// Determine if the server's executable was removed or replaced. This is a
/// specific check; useful for successful restarts. May not be available or
/// accurate on all platforms; defaults to false.
#[must_use]
pub fn current_exe_deleted() -> bool {
std::env::current_exe()
.is_ok_and(|exe| exe.to_str().is_some_and(|exe| exe.ends_with(" (deleted)")))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/string/tests.rs | crates/server/src/utils/string/tests.rs | #![cfg(test)]
#[test]
fn common_prefix() {
let input = ["conduwuit", "conduit", "construct"];
let output = super::common_prefix(&input);
assert_eq!(output, "con");
}
#[test]
fn common_prefix_empty() {
let input = ["abcdefg", "hijklmn", "opqrstu"];
let output = super::common_prefix(&input);
assert_eq!(output, "");
}
#[test]
fn common_prefix_none() {
let input = [];
let output = super::common_prefix(&input);
assert_eq!(output, "");
}
#[test]
fn camel_to_snake_case_0() {
let res = super::camel_to_snake_string("CamelToSnakeCase");
assert_eq!(res, "camel_to_snake_case");
}
#[test]
fn camel_to_snake_case_1() {
let res = super::camel_to_snake_string("CAmelTOSnakeCase");
assert_eq!(res, "camel_tosnake_case");
}
#[test]
fn unquote() {
use super::Unquote;
assert_eq!("\"foo\"".unquote(), Some("foo"));
assert_eq!("\"foo".unquote(), None);
assert_eq!("foo".unquote(), None);
}
#[test]
fn unquote_infallible() {
use super::Unquote;
assert_eq!("\"foo\"".unquote_infallible(), "foo");
assert_eq!("\"foo".unquote_infallible(), "\"foo");
assert_eq!("foo".unquote_infallible(), "foo");
}
#[test]
fn between() {
use super::Between;
assert_eq!("\"foo\"".between(("\"", "\"")), Some("foo"));
assert_eq!("\"foo".between(("\"", "\"")), None);
assert_eq!("foo".between(("\"", "\"")), None);
}
#[test]
fn between_infallible() {
use super::Between;
assert_eq!("\"foo\"".between_infallible(("\"", "\"")), "foo");
assert_eq!("\"foo".between_infallible(("\"", "\"")), "\"foo");
assert_eq!("foo".between_infallible(("\"", "\"")), "foo");
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/string/between.rs | crates/server/src/utils/string/between.rs | type Delim<'a> = (&'a str, &'a str);
/// Slice a string between a pair of delimiters.
pub trait Between<'a> {
/// Extract a string between the delimiters. If the delimiters were not
/// found None is returned, otherwise the first extraction is returned.
fn between(&self, delim: Delim<'_>) -> Option<&'a str>;
/// Extract a string between the delimiters. If the delimiters were not
/// found the original string is returned; take note of this behavior,
/// if an empty slice is desired for this case use the fallible version and
/// unwrap to EMPTY.
fn between_infallible(&self, delim: Delim<'_>) -> &'a str;
}
impl<'a> Between<'a> for &'a str {
#[inline]
fn between_infallible(&self, delim: Delim<'_>) -> &'a str {
self.between(delim).unwrap_or(self)
}
#[inline]
fn between(&self, delim: Delim<'_>) -> Option<&'a str> {
self.split_once(delim.0)
.and_then(|(_, b)| b.rsplit_once(delim.1))
.map(|(a, _)| a)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/string/unquoted.rs | crates/server/src/utils/string/unquoted.rs | use std::ops::Deref;
use serde::{Deserialize, Deserializer, de};
use super::Unquote;
use crate::AppError;
/// Unquoted string which deserialized from a quoted string. Construction from a
/// &str is infallible such that the input can already be unquoted. Construction
/// from serde deserialization is fallible and the input must be quoted.
#[repr(transparent)]
pub struct Unquoted(str);
impl<'a> Unquoted {
#[inline]
#[must_use]
pub fn as_str(&'a self) -> &'a str {
&self.0
}
}
impl<'a, 'de: 'a> Deserialize<'de> for &'a Unquoted {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = <&'a str>::deserialize(deserializer)?;
s.is_quoted()
.then_some(s)
.ok_or(AppError::public("expected quoted string"))
.map_err(de::Error::custom)
.map(Into::into)
}
}
impl<'a> From<&'a str> for &'a Unquoted {
fn from(s: &'a str) -> &'a Unquoted {
let s: &'a str = s.unquote_infallible();
//SAFETY: This is a pattern I lifted from ruma-identifiers for strong-type strs
// by wrapping in a tuple-struct.
#[allow(clippy::transmute_ptr_to_ptr)]
unsafe {
std::mem::transmute(s)
}
}
}
impl Deref for Unquoted {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<'a> AsRef<str> for &'a Unquoted {
fn as_ref(&self) -> &'a str {
&self.0
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/string/unquote.rs | crates/server/src/utils/string/unquote.rs | const QUOTE: char = '"';
/// Slice a string between quotes
pub trait Unquote<'a> {
/// Whether the input is quoted. If this is false the fallible methods of
/// this interface will fail.
fn is_quoted(&self) -> bool;
/// Unquotes a string. If the input is not quoted it is simply returned
/// as-is. If the input is partially quoted on either end that quote is not
/// removed.
fn unquote(&self) -> Option<&'a str>;
/// Unquotes a string. The input must be quoted on each side for Some to be
/// returned
fn unquote_infallible(&self) -> &'a str;
}
impl<'a> Unquote<'a> for &'a str {
#[inline]
fn unquote_infallible(&self) -> &'a str {
self.strip_prefix(QUOTE)
.unwrap_or(self)
.strip_suffix(QUOTE)
.unwrap_or(self)
}
#[inline]
fn unquote(&self) -> Option<&'a str> {
self.strip_prefix(QUOTE).and_then(|s| s.strip_suffix(QUOTE))
}
#[inline]
fn is_quoted(&self) -> bool {
self.starts_with(QUOTE) && self.ends_with(QUOTE)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/string/split.rs | crates/server/src/utils/string/split.rs | use super::EMPTY;
type Pair<'a> = (&'a str, &'a str);
/// Split a string with default behaviors on non-match.
pub trait SplitInfallible<'a> {
/// Split a string at the first occurrence of delim. If not found, the
/// entire string is returned in \[0\], while \[1\] is empty.
fn split_once_infallible(&self, delim: &str) -> Pair<'a>;
/// Split a string from the last occurrence of delim. If not found, the
/// entire string is returned in \[0\], while \[1\] is empty.
fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a>;
}
impl<'a> SplitInfallible<'a> for &'a str {
#[inline]
fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a> {
self.rsplit_once(delim).unwrap_or((self, EMPTY))
}
#[inline]
fn split_once_infallible(&self, delim: &str) -> Pair<'a> {
self.split_once(delim).unwrap_or((self, EMPTY))
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/utils/sys/compute.rs | crates/server/src/utils/sys/compute.rs | //! System utilities related to compute/processing
use std::{path::PathBuf, sync::LazyLock};
type Id = usize;
type Mask = u128;
type Masks = [Mask; MASK_BITS];
const MASK_BITS: usize = 128;
/// The mask of logical cores available to the process (at startup).
static CORES_AVAILABLE: LazyLock<Mask> = LazyLock::new(|| into_mask(query_cores_available()));
/// Stores the mask of logical-cores with thread/HT/SMT association. Each group
/// here makes up a physical-core.
static SMT_TOPOLOGY: LazyLock<Masks> = LazyLock::new(init_smt_topology);
/// Stores the mask of logical-core associations on a node/socket. Bits are set
/// for all logical cores within all physical cores of the node.
static NODE_TOPOLOGY: LazyLock<Masks> = LazyLock::new(init_node_topology);
/// Get the number of threads which could execute in parallel based on hardware
/// constraints of this system.
#[inline]
#[must_use]
pub fn available_parallelism() -> usize {
cores_available().count()
}
/// Gets the ID of the nth core available. This bijects our sequence of cores to
/// actual ID's which may have gaps for cores which are not available.
#[inline]
#[must_use]
pub fn nth_core_available(i: usize) -> Option<Id> {
cores_available().nth(i)
}
/// Determine if core (by id) is available to the process.
#[inline]
#[must_use]
pub fn is_core_available(id: Id) -> bool {
cores_available().any(|i| i == id)
}
/// Get the list of cores available. The values were recorded at program start.
#[inline]
pub fn cores_available() -> impl Iterator<Item = Id> {
from_mask(*CORES_AVAILABLE)
}
// #[cfg(target_os = "linux")]
// #[inline]
// pub fn getcpu() -> Result<usize> {
// use crate::{Error, utils::math};
// // SAFETY: This is part of an interface with many low-level calls taking many
// // raw params, but it's unclear why this specific call is unsafe. Nevertheless
// // the value obtained here is semantically unsafe because it can change on the
// // instruction boundary trailing its own acquisition and also any other time.
// let ret: i32 = unsafe { nix::libc::sched_getcpu() };
// #[cfg(target_os = "linux")]
// // SAFETY: On modern linux systems with a vdso if we can optimize away the branch checking
// // for error (see getcpu(2)) then this system call becomes a memory access.
// unsafe {
// std::hint::assert_unchecked(ret >= 0);
// };
// if ret == -1 {
// return Err(Error::from_errno());
// }
// math::try_into(ret)
// }
// #[cfg(not(target_os = "linux"))]
// #[inline]
// pub fn getcpu() -> Result<usize, IoError> {
// Err(IoError::new(ErrorKind::Unsupported, "not supported").into())
// }
fn query_cores_available() -> impl Iterator<Item = Id> {
core_affinity::get_core_ids()
.unwrap_or_default()
.into_iter()
.map(|core_id| core_id.id)
}
fn init_smt_topology() -> [Mask; MASK_BITS] {
[Mask::default(); MASK_BITS]
}
fn init_node_topology() -> [Mask; MASK_BITS] {
[Mask::default(); MASK_BITS]
}
fn into_mask<I>(ids: I) -> Mask
where
I: Iterator<Item = Id>,
{
ids.inspect(|&id| {
debug_assert!(
id < MASK_BITS,
"Core ID must be < Mask::BITS at least for now"
);
})
.fold(Mask::default(), |mask, id| mask | (1 << id))
}
fn from_mask(v: Mask) -> impl Iterator<Item = Id> {
(0..MASK_BITS).filter(move |&i| (v & (1 << i)) != 0)
}
fn _sys_path(id: usize, suffix: &str) -> PathBuf {
format!("/sys/devices/system/cpu/cpu{id}/{suffix}").into()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/event/batch_token.rs | crates/server/src/event/batch_token.rs | use std::str::FromStr;
use crate::MatrixError;
use crate::core::Seqnum;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BatchToken {
Live {
stream_ordering: Seqnum,
},
Historic {
stream_ordering: Seqnum,
topological_ordering: i64,
},
}
impl BatchToken {
pub fn new_live(stream_ordering: Seqnum) -> Self {
Self::Live { stream_ordering }
}
pub fn new_historic(stream_ordering: Seqnum, topological_ordering: i64) -> Self {
Self::Historic {
stream_ordering,
topological_ordering,
}
}
pub fn event_sn(&self) -> Seqnum {
match self {
BatchToken::Live { stream_ordering } => stream_ordering.abs(),
BatchToken::Historic {
stream_ordering, ..
} => stream_ordering.abs(),
}
}
pub fn stream_ordering(&self) -> Seqnum {
match self {
BatchToken::Live { stream_ordering } => *stream_ordering,
BatchToken::Historic {
stream_ordering, ..
} => *stream_ordering,
}
}
pub fn topological_ordering(&self) -> Option<i64> {
match self {
BatchToken::Live { .. } => None,
BatchToken::Historic {
topological_ordering,
..
} => Some(*topological_ordering),
}
}
pub const LIVE_MIN: Self = Self::Live { stream_ordering: 0 };
pub const LIVE_MAX: Self = Self::Live {
stream_ordering: Seqnum::MAX,
};
}
// Live tokens start with an "s" followed by the `stream_ordering` of the event
// that comes before the position of the token. Said another way:
// `stream_ordering` uniquely identifies a persisted event. The live token
// means "the position just after the event identified by `stream_ordering`".
// An example token is:
// s2633508
// ---
// Historic tokens start with a "t" followed by the `depth`
// (`topological_ordering` in the event graph) of the event that comes before
// the position of the token, followed by "-", followed by the
// `stream_ordering` of the event that comes before the position of the token.
// An example token is:
// t426-2633508
// ---
impl FromStr for BatchToken {
type Err = MatrixError;
fn from_str(input: &str) -> Result<Self, Self::Err> {
if let Some(stripped) = input.strip_prefix('s') {
let stream_ordering: Seqnum = stripped.parse().map_err(|_| {
MatrixError::invalid_param("invalid batch token: cannot parse stream ordering")
})?;
Ok(BatchToken::Live { stream_ordering })
} else if let Some(stripped) = input.strip_prefix('t') {
let parts: Vec<&str> = stripped.splitn(2, '-').collect();
if parts.len() != 2 {
return Err(MatrixError::invalid_param(
"invalid batch token: missing '-' separator",
));
}
let topological_ordering: i64 = parts[0].parse().map_err(|_| {
MatrixError::invalid_param("invalid batch token: cannot parse topological ordering")
})?;
let stream_ordering: Seqnum = parts[1].parse().map_err(|_| {
MatrixError::invalid_param("invalid batch token: cannot parse stream ordering")
})?;
Ok(BatchToken::Historic {
stream_ordering,
topological_ordering,
})
} else {
Err(MatrixError::invalid_param(
"invalid batch token: must start with 's' or 't'",
))
}
}
}
impl std::fmt::Display for BatchToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BatchToken::Live { stream_ordering } => write!(f, "s{}", stream_ordering),
BatchToken::Historic {
stream_ordering,
topological_ordering,
} => write!(f, "t{}-{}", topological_ordering, stream_ordering),
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/event/pdu.rs | crates/server/src/event/pdu.rs | use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::ops::{Deref, DerefMut};
use diesel::prelude::*;
use serde::{Deserialize, Serialize};
use serde_json::{json, value::to_raw_value};
use ulid::Ulid;
use crate::core::client::filter::RoomEventFilter;
use crate::core::events::room::history_visibility::{
HistoryVisibility, RoomHistoryVisibilityEventContent,
};
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::events::room::redaction::RoomRedactionEventContent;
use crate::core::events::space::child::HierarchySpaceChildEvent;
use crate::core::events::{
AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent,
AnySyncTimelineEvent, AnyTimelineEvent, MessageLikeEventContent, StateEvent, StateEventContent,
StateEventType, TimelineEventType,
};
use crate::core::identifiers::*;
use crate::core::room_version_rules::RoomIdFormatVersion;
use crate::core::serde::to_canonical_object;
use crate::core::serde::{
CanonicalJsonObject, CanonicalJsonValue, JsonValue, RawJson, RawJsonValue, default_false,
};
use crate::core::serde::{to_canonical_value, validate_canonical_json};
use crate::core::state::{StateError, event_auth};
use crate::core::{Seqnum, UnixMillis, UserId};
use crate::data::room::{DbEventData, NewDbEvent};
use crate::data::{connect, schema::*};
use crate::event::{BatchToken, SeqnumQueueGuard};
use crate::room::timeline::get_pdu;
use crate::room::{get_state, state};
use crate::{AppError, AppResult, MatrixError, RoomMutexGuard, room};
/// Content hashes of a PDU.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct EventHash {
/// The SHA-256 hash.
pub sha256: String,
}
#[derive(Clone, Deserialize, Serialize, Debug)]
pub struct SnPduEvent {
#[serde(flatten)]
pub pdu: PduEvent,
#[serde(skip_serializing)]
pub event_sn: Seqnum,
#[serde(skip, default)]
pub is_outlier: bool,
#[serde(skip, default = "default_false")]
pub soft_failed: bool,
#[serde(skip, default = "default_false")]
pub is_backfill: bool,
}
impl SnPduEvent {
pub fn new(
pdu: PduEvent,
event_sn: Seqnum,
is_outlier: bool,
soft_failed: bool,
is_backfill: bool,
) -> Self {
Self {
pdu,
event_sn,
is_outlier,
soft_failed,
is_backfill,
}
}
pub fn user_can_see(&self, user_id: &UserId) -> AppResult<bool> {
if self.event_ty == TimelineEventType::RoomMember
&& self.state_key.as_deref() == Some(user_id.as_str())
{
return Ok(true);
}
if self.is_room_state() {
if room::is_world_readable(&self.room_id) {
return Ok(!room::user::is_banned(user_id, &self.room_id)?);
} else if room::user::is_joined(user_id, &self.room_id)? {
return Ok(true);
}
}
let frame_id = match state::get_pdu_frame_id(&self.event_id) {
Ok(frame_id) => frame_id,
Err(_) => match state::get_room_frame_id(&self.room_id, None) {
Ok(frame_id) => frame_id,
Err(_) => {
return Ok(false);
}
},
};
if let Some(visibility) = state::USER_VISIBILITY_CACHE
.lock()
.unwrap()
.get_mut(&(user_id.to_owned(), frame_id))
{
return Ok(*visibility);
}
let history_visibility = state::get_state_content::<RoomHistoryVisibilityEventContent>(
frame_id,
&StateEventType::RoomHistoryVisibility,
"",
)
.map_or(
HistoryVisibility::Shared,
|c: RoomHistoryVisibilityEventContent| c.history_visibility,
);
let visibility = match history_visibility {
HistoryVisibility::WorldReadable => true,
HistoryVisibility::Shared => {
let Ok(membership) = state::user_membership(frame_id, user_id) else {
return crate::room::user::is_joined(user_id, &self.room_id);
};
membership == MembershipState::Join
|| crate::room::user::is_joined(user_id, &self.room_id)?
}
HistoryVisibility::Invited => {
// Allow if any member on requesting server was AT LEAST invited, else deny
state::user_was_invited(frame_id, user_id)
}
HistoryVisibility::Joined => {
// Allow if any member on requested server was joined, else deny
state::user_was_joined(frame_id, user_id)
|| state::user_was_joined(frame_id - 1, user_id)
}
_ => {
error!("unknown history visibility {history_visibility}");
false
}
};
state::USER_VISIBILITY_CACHE
.lock()
.expect("should locked")
.insert((user_id.to_owned(), frame_id), visibility);
Ok(visibility)
}
pub fn add_unsigned_membership(&mut self, user_id: &UserId) -> AppResult<()> {
#[derive(Deserialize)]
struct ExtractMemebership {
membership: String,
}
let membership = if self.event_ty == TimelineEventType::RoomMember
&& self.state_key == Some(user_id.to_string())
{
self.get_content::<ExtractMemebership>()
.map(|m| m.membership)
.ok()
} else if let Ok(frame_id) = crate::event::get_frame_id(&self.room_id, self.event_sn) {
state::user_membership(frame_id, user_id)
.ok()
.map(|m| m.to_string())
} else {
None
};
if let Some(membership) = membership {
self.unsigned.insert(
"membership".to_owned(),
to_raw_value(&membership).expect("should always work"),
);
} else {
self.unsigned.insert(
"membership".to_owned(),
to_raw_value("leave").expect("should always work"),
);
}
Ok(())
}
pub fn from_canonical_object(
room_id: &RoomId,
event_id: &EventId,
event_sn: Seqnum,
json: CanonicalJsonObject,
is_outlier: bool,
soft_failed: bool,
is_backfill: bool,
) -> Result<Self, serde_json::Error> {
let pdu = PduEvent::from_canonical_object(room_id, event_id, json)?;
Ok(Self::new(
pdu,
event_sn,
is_outlier,
soft_failed,
is_backfill,
))
}
pub fn from_json_value(
room_id: &RoomId,
event_id: &EventId,
event_sn: Seqnum,
json: JsonValue,
is_outlier: bool,
soft_failed: bool,
is_backfill: bool,
) -> AppResult<Self> {
let pdu = PduEvent::from_json_value(room_id, event_id, json)?;
Ok(Self::new(
pdu,
event_sn,
is_outlier,
soft_failed,
is_backfill,
))
}
pub fn into_inner(self) -> PduEvent {
self.pdu
}
pub fn live_token(&self) -> BatchToken {
BatchToken::Live {
stream_ordering: self.event_sn,
}
}
pub fn historic_token(&self) -> BatchToken {
BatchToken::Historic {
stream_ordering: if self.is_backfill {
-self.event_sn
} else {
self.event_sn
},
topological_ordering: self.depth as i64,
}
}
pub fn prev_historic_token(&self) -> BatchToken {
BatchToken::Historic {
stream_ordering: if self.is_backfill {
-self.event_sn - 1
} else {
self.event_sn - 1
},
topological_ordering: self.depth as i64,
}
}
}
impl AsRef<PduEvent> for SnPduEvent {
fn as_ref(&self) -> &PduEvent {
&self.pdu
}
}
impl AsMut<PduEvent> for SnPduEvent {
fn as_mut(&mut self) -> &mut PduEvent {
&mut self.pdu
}
}
impl DerefMut for SnPduEvent {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.pdu
}
}
impl Deref for SnPduEvent {
type Target = PduEvent;
fn deref(&self) -> &Self::Target {
&self.pdu
}
}
// impl TryFrom<(PduEvent, Option<Seqnum>)> for SnPduEvent {
// type Error = AppError;
// fn try_from((pdu, event_sn): (PduEvent, Option<Seqnum>)) -> Result<Self, Self::Error> {
// if let Some(sn) = event_sn {
// Ok(SnPduEvent::new(pdu, sn))
// } else {
// Err(AppError::internal(
// "Cannot convert PDU without event_sn to SnPduEvent.",
// ))
// }
// }
// }
impl crate::core::state::Event for SnPduEvent {
type Id = OwnedEventId;
fn event_id(&self) -> &Self::Id {
&self.event_id
}
fn room_id(&self) -> &RoomId {
&self.room_id
}
fn sender(&self) -> &UserId {
&self.sender
}
fn event_type(&self) -> &TimelineEventType {
&self.event_ty
}
fn content(&self) -> &RawJsonValue {
&self.content
}
fn origin_server_ts(&self) -> UnixMillis {
self.origin_server_ts
}
fn state_key(&self) -> Option<&str> {
self.state_key.as_deref()
}
fn prev_events(&self) -> &[Self::Id] {
self.prev_events.deref()
}
fn auth_events(&self) -> &[Self::Id] {
self.auth_events.deref()
}
fn redacts(&self) -> Option<&Self::Id> {
self.redacts.as_ref()
}
fn rejected(&self) -> bool {
self.pdu.rejected()
}
}
// These impl's allow us to dedup state snapshots when resolving state
// for incoming events (federation/send/{txn}).
impl Eq for SnPduEvent {}
impl PartialEq for SnPduEvent {
fn eq(&self, other: &Self) -> bool {
self.event_id == other.event_id
}
}
impl PartialOrd for SnPduEvent {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
// self.event_id.partial_cmp(&other.event_id)
Some(self.cmp(other))
}
}
impl Ord for SnPduEvent {
fn cmp(&self, other: &Self) -> Ordering {
self.event_id.cmp(&other.event_id)
}
}
#[derive(Clone, Deserialize, Serialize, Debug)]
pub struct PduEvent {
pub event_id: OwnedEventId,
#[serde(rename = "type")]
pub event_ty: TimelineEventType,
pub room_id: OwnedRoomId,
pub sender: OwnedUserId,
pub origin_server_ts: UnixMillis,
pub content: Box<RawJsonValue>,
#[serde(skip_serializing_if = "Option::is_none")]
pub state_key: Option<String>,
#[serde(default)]
pub prev_events: Vec<OwnedEventId>,
pub depth: u64,
#[serde(default)]
pub auth_events: Vec<OwnedEventId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub redacts: Option<OwnedEventId>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub unsigned: BTreeMap<String, Box<RawJsonValue>>,
pub hashes: EventHash,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub signatures: Option<Box<RawJsonValue>>, // BTreeMap<Box<ServerName>, BTreeMap<ServerSigningKeyId, String>>
#[serde(default, flatten, skip_serializing_if = "BTreeMap::is_empty")]
pub extra_data: BTreeMap<String, JsonValue>,
#[serde(skip, default)]
pub rejection_reason: Option<String>,
}
impl PduEvent {
#[tracing::instrument]
pub fn redact(&mut self, reason: &PduEvent) -> AppResult<()> {
let allowed: &[&str] = match self.event_ty {
TimelineEventType::RoomMember => &["join_authorised_via_users_server", "membership"],
TimelineEventType::RoomCreate => &["creator"],
TimelineEventType::RoomJoinRules => &["join_rule"],
TimelineEventType::RoomPowerLevels => &[
"ban",
"events",
"events_default",
"kick",
"redact",
"state_default",
"users",
"users_default",
],
TimelineEventType::RoomHistoryVisibility => &["history_visibility"],
_ => &[],
};
let mut old_content = self
.get_content::<BTreeMap<String, serde_json::Value>>()
.map_err(|_| AppError::internal("PDU in db has invalid content."))?;
let mut new_content = serde_json::Map::new();
for key in allowed {
if let Some(value) = old_content.remove(*key) {
new_content.insert((*key).to_owned(), value);
}
}
self.unsigned = BTreeMap::new();
self.unsigned.insert(
"redacted_because".to_owned(),
to_raw_value(reason).expect("to_raw_value(PduEvent) always works"),
);
self.content = to_raw_value(&new_content).expect("to string always works");
Ok(())
}
pub fn redacts_id(&self, room_version: &RoomVersionId) -> Option<OwnedEventId> {
use RoomVersionId::*;
if self.event_ty != TimelineEventType::RoomRedaction {
return None;
}
match *room_version {
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => self.redacts.clone(),
_ => {
self.get_content::<RoomRedactionEventContent>()
.ok()?
.redacts
}
}
}
pub fn remove_transaction_id(&mut self) -> AppResult<()> {
self.unsigned.remove("transaction_id");
Ok(())
}
pub fn add_age(&mut self) -> AppResult<()> {
let now: i128 = UnixMillis::now().get().into();
let then: i128 = self.origin_server_ts.get().into();
let age = now.saturating_sub(then);
self.unsigned
.insert("age".to_owned(), to_raw_value(&age).unwrap());
Ok(())
}
#[tracing::instrument]
pub fn to_sync_room_event(&self) -> RawJson<AnySyncTimelineEvent> {
let mut json = json!({
"content": self.content,
"type": self.event_ty,
"event_id": *self.event_id,
"sender": self.sender,
"origin_server_ts": self.origin_server_ts,
});
if !self.unsigned.is_empty() {
json["unsigned"] = json!(self.unsigned);
}
if let Some(state_key) = &self.state_key {
json["state_key"] = json!(state_key);
}
if let Some(redacts) = &self.redacts {
json["redacts"] = json!(redacts);
}
serde_json::from_value(json).expect("RawJson::from_value always works")
}
#[tracing::instrument]
pub fn to_room_event(&self) -> RawJson<AnyTimelineEvent> {
let mut data = json!({
"content": self.content,
"type": self.event_ty,
"event_id": *self.event_id,
"sender": self.sender,
"origin_server_ts": self.origin_server_ts,
"room_id": self.room_id,
});
if !self.unsigned.is_empty() {
data["unsigned"] = json!(self.unsigned);
}
if let Some(state_key) = &self.state_key {
data["state_key"] = json!(state_key);
}
if let Some(redacts) = &self.redacts {
data["redacts"] = json!(redacts);
}
serde_json::from_value(data).expect("RawJson::from_value always works")
}
#[tracing::instrument]
pub fn to_message_like_event(&self) -> RawJson<AnyMessageLikeEvent> {
let mut data = json!({
"content": self.content,
"type": self.event_ty,
"event_id": *self.event_id,
"sender": self.sender,
"origin_server_ts": self.origin_server_ts,
"room_id": self.room_id,
});
if !self.unsigned.is_empty() {
data["unsigned"] = json!(self.unsigned);
}
if let Some(state_key) = &self.state_key {
data["state_key"] = json!(state_key);
}
if let Some(redacts) = &self.redacts {
data["redacts"] = json!(redacts);
}
serde_json::from_value(data).expect("RawJson::from_value always works")
}
#[tracing::instrument]
pub fn to_state_event(&self) -> RawJson<AnyStateEvent> {
serde_json::from_value(self.to_state_event_value())
.expect("RawJson::from_value always works")
}
#[tracing::instrument]
pub fn to_state_event_value(&self) -> JsonValue {
let JsonValue::Object(mut data) = json!({
"content": self.content,
"type": self.event_ty,
"event_id": *self.event_id,
"sender": self.sender,
"origin_server_ts": self.origin_server_ts,
"room_id": self.room_id,
"state_key": self.state_key,
}) else {
panic!("Invalid JSON value, never happened!");
};
if !self.unsigned.is_empty() {
data.insert("unsigned".into(), json!(self.unsigned));
}
for (key, value) in &self.extra_data {
if !data.contains_key(key) {
data.insert(key.clone(), value.clone());
}
}
JsonValue::Object(data)
}
#[tracing::instrument]
pub fn to_sync_state_event(&self) -> RawJson<AnySyncStateEvent> {
let mut data = json!({
"content": self.content,
"type": self.event_ty,
"event_id": *self.event_id,
"sender": self.sender,
"origin_server_ts": self.origin_server_ts,
"state_key": self.state_key,
});
if !self.unsigned.is_empty() {
data["unsigned"] = json!(self.unsigned);
}
serde_json::from_value(data).expect("RawJson::from_value always works")
}
#[tracing::instrument]
pub fn to_stripped_state_event(&self) -> RawJson<AnyStrippedStateEvent> {
if self.event_ty == TimelineEventType::RoomCreate {
let version_rules = crate::room::get_version(&self.room_id)
.and_then(|version| crate::room::get_version_rules(&version));
if let Ok(version_rules) = version_rules
&& version_rules.authorization.room_create_event_id_as_room_id
{
return serde_json::from_value(json!(self))
.expect("RawJson::from_value always works");
}
}
let data = json!({
"content": self.content,
"type": self.event_ty,
"sender": self.sender,
"state_key": self.state_key,
});
serde_json::from_value(data).expect("RawJson::from_value always works")
}
#[tracing::instrument]
pub fn to_stripped_space_child_event(&self) -> RawJson<HierarchySpaceChildEvent> {
let data = json!({
"content": self.content,
"type": self.event_ty,
"sender": self.sender,
"state_key": self.state_key,
"origin_server_ts": self.origin_server_ts,
});
serde_json::from_value(data).expect("RawJson::from_value always works")
}
#[tracing::instrument]
pub fn to_member_event(&self) -> RawJson<StateEvent<RoomMemberEventContent>> {
let mut data = json!({
"content": self.content,
"type": self.event_ty,
"event_id": *self.event_id,
"sender": self.sender,
"origin_server_ts": self.origin_server_ts,
"redacts": self.redacts,
"room_id": self.room_id,
"state_key": self.state_key,
});
if !self.unsigned.is_empty() {
data["unsigned"] = json!(self.unsigned);
}
serde_json::from_value(data).expect("RawJson::from_value always works")
}
pub fn from_canonical_object(
room_id: &RoomId,
event_id: &EventId,
mut json: CanonicalJsonObject,
) -> Result<Self, serde_json::Error> {
json.insert("room_id".to_owned(), room_id.as_str().into());
json.insert(
"event_id".to_owned(),
CanonicalJsonValue::String(event_id.as_str().to_owned()),
);
serde_json::from_value(serde_json::to_value(json).expect("valid JSON"))
}
pub fn from_json_value(
room_id: &RoomId,
event_id: &EventId,
json: JsonValue,
) -> AppResult<Self> {
if let JsonValue::Object(mut obj) = json {
obj.insert("event_id".to_owned(), event_id.as_str().into());
obj.insert("room_id".to_owned(), room_id.as_str().into());
serde_json::from_value(serde_json::Value::Object(obj)).map_err(Into::into)
} else {
Err(AppError::public("invalid json value"))
}
}
pub fn get_content<T>(&self) -> Result<T, serde_json::Error>
where
T: for<'de> Deserialize<'de>,
{
serde_json::from_str(self.content.get())
}
pub fn is_room_state(&self) -> bool {
self.state_key.as_deref() == Some("")
}
pub fn is_user_state(&self) -> bool {
self.state_key.is_some() && self.state_key.as_deref() != Some("")
}
pub fn can_pass_filter(&self, filter: &RoomEventFilter) -> bool {
if filter.not_types.contains(&self.event_ty.to_string()) {
return false;
}
if filter.not_rooms.contains(&self.room_id) {
return false;
}
if filter.not_senders.contains(&self.sender) {
return false;
}
if let Some(rooms) = &filter.rooms
&& !rooms.contains(&self.room_id)
{
return false;
}
if let Some(senders) = &filter.senders
&& !senders.contains(&self.sender)
{
return false;
}
if let Some(types) = &filter.types
&& !types.contains(&self.event_ty.to_string())
{
return false;
}
// TODO: url filter
// if let Some(url_filter) = &filter.url_filter {
// match url_filter {
// UrlFilter::EventsWithUrl => if !self.events::contains_url.eq(true)),
// UrlFilter::EventsWithoutUrl => query = query.filter(events::contains_url.eq(false)),
// }
// }
true
}
}
impl crate::core::state::Event for PduEvent {
type Id = OwnedEventId;
fn event_id(&self) -> &Self::Id {
&self.event_id
}
fn room_id(&self) -> &RoomId {
&self.room_id
}
fn sender(&self) -> &UserId {
&self.sender
}
fn event_type(&self) -> &TimelineEventType {
&self.event_ty
}
fn content(&self) -> &RawJsonValue {
&self.content
}
fn origin_server_ts(&self) -> UnixMillis {
self.origin_server_ts
}
fn state_key(&self) -> Option<&str> {
self.state_key.as_deref()
}
fn prev_events(&self) -> &[Self::Id] {
self.prev_events.deref()
}
fn auth_events(&self) -> &[Self::Id] {
self.auth_events.deref()
}
fn redacts(&self) -> Option<&Self::Id> {
self.redacts.as_ref()
}
fn rejected(&self) -> bool {
self.rejection_reason.is_some()
}
}
// These impl's allow us to dedup state snapshots when resolving state
// for incoming events (federation/send/{txn}).
impl Eq for PduEvent {}
impl PartialEq for PduEvent {
fn eq(&self, other: &Self) -> bool {
self.event_id == other.event_id
}
}
impl PartialOrd for PduEvent {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
// self.event_id.partial_cmp(&other.event_id)
Some(self.cmp(other))
}
}
impl Ord for PduEvent {
fn cmp(&self, other: &Self) -> Ordering {
self.event_id.cmp(&other.event_id)
}
}
/// Build the start of a PDU in order to add it to the Database.
#[derive(Debug, Deserialize)]
pub struct PduBuilder {
#[serde(rename = "type")]
pub event_type: TimelineEventType,
pub content: Box<RawJsonValue>,
#[serde(default)]
pub unsigned: BTreeMap<String, Box<RawJsonValue>>,
pub state_key: Option<String>,
pub redacts: Option<OwnedEventId>,
pub timestamp: Option<UnixMillis>,
}
impl PduBuilder {
pub fn state<T>(state_key: String, content: &T) -> Self
where
T: StateEventContent,
{
Self {
event_type: content.event_type().into(),
content: to_raw_value(content)
.expect("builder failed to serialize state event content to RawValue"),
state_key: Some(state_key),
..Self::default()
}
}
pub fn timeline<T>(content: &T) -> Self
where
T: MessageLikeEventContent,
{
Self {
event_type: content.event_type().into(),
content: to_raw_value(content)
.expect("builder failed to serialize timeline event content to RawValue"),
..Self::default()
}
}
pub async fn hash_sign_save(
self,
sender_id: &UserId,
room_id: &RoomId,
room_version: &RoomVersionId,
_state_lock: &RoomMutexGuard,
) -> AppResult<(SnPduEvent, CanonicalJsonObject, Option<SeqnumQueueGuard>)> {
let (pdu, pdu_json) = self.hash_sign(sender_id, room_id, room_version).await?;
let (event_sn, event_guard) = crate::event::ensure_event_sn(room_id, &pdu.event_id)?;
let content_value: JsonValue = serde_json::from_str(pdu.content.get())?;
NewDbEvent {
id: pdu.event_id.to_owned(),
sn: event_sn,
ty: pdu.event_ty.to_string(),
room_id: pdu.room_id.to_owned(),
unrecognized_keys: None,
depth: pdu.depth as i64,
topological_ordering: pdu.depth as i64,
stream_ordering: event_sn,
origin_server_ts: pdu.origin_server_ts,
received_at: None,
sender_id: Some(sender_id.to_owned()),
contains_url: content_value.get("url").is_some(),
worker_id: None,
state_key: pdu.state_key.clone(),
is_outlier: true,
soft_failed: false,
is_rejected: false,
rejection_reason: None,
}
.save()?;
DbEventData {
event_id: pdu.event_id.clone(),
event_sn,
room_id: pdu.room_id.to_owned(),
internal_metadata: None,
json_data: serde_json::to_value(&pdu_json)?,
format_version: None,
}
.save()?;
Ok((
SnPduEvent {
pdu,
event_sn,
is_outlier: true,
soft_failed: false,
is_backfill: false,
},
pdu_json,
event_guard,
))
}
pub async fn hash_sign(
self,
sender_id: &UserId,
room_id: &RoomId,
room_version: &RoomVersionId,
) -> AppResult<(PduEvent, CanonicalJsonObject)> {
let PduBuilder {
event_type,
content,
mut unsigned,
state_key,
redacts,
timestamp,
..
} = self;
let prev_events: Vec<_> = state::get_forward_extremities(room_id)?
.into_iter()
.take(20)
.collect();
let conf = crate::config::get();
// If there was no create event yet, assume we are creating a room with the default
// version right now
// let room_version = if let Ok(room_version) = super::get_version(room_id) {
// room_version
// } else if event_type == TimelineEventType::RoomCreate {
// let content: RoomCreateEventContent = serde_json::from_str(content.get())?;
// content.room_version
// } else {
// return Err(AppError::public(format!(
// "non-create event for room `{room_id}` of unknown version"
// )));
// };
let version_rules = crate::room::get_version_rules(room_version)?;
let auth_rules = &version_rules.authorization;
let auth_events = state::get_auth_events(
room_id,
&event_type,
sender_id,
state_key.as_deref(),
&content,
auth_rules,
)?;
// Our depth is the maximum depth of prev_events + 1
let depth = prev_events
.iter()
.filter_map(|event_id| Some(get_pdu(event_id).ok()?.depth))
.max()
.unwrap_or(0)
+ 1;
if let Some(state_key) = &state_key
&& let Ok(prev_pdu) =
crate::room::get_state(room_id, &event_type.to_string().into(), state_key, None)
{
unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone());
unsigned.insert(
"prev_sender".to_owned(),
to_raw_value(&prev_pdu.sender).expect("UserId::to_value always works"),
);
unsigned.insert(
"replaces_state".to_owned(),
to_raw_value(&prev_pdu.event_id).expect("EventId is valid json"),
);
}
let temp_event_id =
OwnedEventId::try_from(format!("$backfill_{}", Ulid::new().to_string())).unwrap();
let mut pdu = PduEvent {
event_id: temp_event_id.clone(),
event_ty: event_type,
room_id: room_id.to_owned(),
sender: sender_id.to_owned(),
origin_server_ts: timestamp.unwrap_or_else(UnixMillis::now),
content,
state_key,
prev_events,
depth,
auth_events: auth_events
.values()
.map(|pdu| pdu.event_id.clone())
.collect(),
redacts,
unsigned,
hashes: EventHash {
sha256: "aaa".to_owned(),
},
signatures: None,
extra_data: Default::default(),
rejection_reason: None,
};
let fetch_event = async |event_id: OwnedEventId| {
get_pdu(&event_id)
.map(|s| s.pdu)
.map_err(|_| StateError::other("missing PDU 6"))
};
let fetch_state = async |k: StateEventType, s: String| {
if let Some(pdu) = auth_events
.get(&(k.clone(), s.to_owned()))
.map(|s| s.pdu.clone())
{
return Ok(pdu);
}
if auth_rules.room_create_event_id_as_room_id && k == StateEventType::RoomCreate {
let pdu = crate::room::get_create(room_id)
.map_err(|_| StateError::other("missing create event"))?
.into_inner();
if pdu.room_id != *room_id {
Err(StateError::other("mismatched room id in create event"))
} else {
Ok(pdu.into_inner())
}
} else {
Err(StateError::other(format!(
"failed hash and sigin event, missing state event, event_type: {k}, state_key:{s}"
)))
}
};
event_auth::auth_check(auth_rules, &pdu, &fetch_event, &fetch_state).await?;
// Hash and sign
let mut pdu_json = to_canonical_object(&pdu).expect("event is valid, we just created it");
pdu_json.remove("event_id");
if version_rules.room_id_format == RoomIdFormatVersion::V2
&& pdu.event_ty == TimelineEventType::RoomCreate
{
pdu_json.remove("room_id");
}
// Add origin because synapse likes that (and it's required in the spec)
pdu_json.insert(
"origin".to_owned(),
to_canonical_value(&conf.server_name)
.expect("server name is a valid CanonicalJsonValue"),
);
match crate::server_key::hash_and_sign_event(&mut pdu_json, room_version) {
Ok(_) => {}
Err(e) => {
return match e {
AppError::Signatures(crate::core::signatures::Error::PduSize) => {
Err(MatrixError::too_large("message is too long").into())
}
_ => Err(MatrixError::unknown("signing event failed").into()),
};
}
}
// Generate event id
pdu.event_id = crate::event::gen_event_id(&pdu_json, room_version)?;
if version_rules.room_id_format == RoomIdFormatVersion::V2
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | true |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/event/search.rs | crates/server/src/event/search.rs | use std::collections::BTreeMap;
use diesel::prelude::*;
use palpo_core::Seqnum;
use crate::core::client::search::{
Criteria, EventContextResult, OrderBy, ResultRoomEvents, SearchResult,
};
use crate::core::events::StateEventType;
use crate::core::events::TimelineEventType;
use crate::core::events::room::member::RoomMemberEventContent;
use crate::core::identifiers::*;
use crate::core::serde::CanonicalJsonObject;
use crate::core::serde::canonical_json::CanonicalJsonValue;
use crate::data::full_text_search::*;
use crate::data::schema::*;
use crate::data::{self, connect};
use crate::event::BatchToken;
use crate::room::{state, timeline};
use crate::{AppResult, MatrixError, SnPduEvent};
pub fn search_pdus(
user_id: &UserId,
criteria: &Criteria,
next_batch: Option<&str>,
) -> AppResult<ResultRoomEvents> {
let filter = &criteria.filter;
let room_ids = filter
.rooms
.clone()
.unwrap_or_else(|| data::user::joined_rooms(user_id).unwrap_or_default());
// Use limit or else 10, with maximum 100
let limit = filter.limit.unwrap_or(10).min(100);
for room_id in &room_ids {
if !crate::room::user::is_joined(user_id, room_id)? {
return Err(MatrixError::forbidden(
"you don't have permission to view this room",
None,
)
.into());
}
}
let base_query = event_searches::table
.filter(event_searches::room_id.eq_any(&room_ids))
.filter(event_searches::vector.matches(websearch_to_tsquery(&criteria.search_term)));
let mut data_query = base_query.clone().into_boxed();
if let Some(mut next_batch) = next_batch.map(|nb| nb.split('-')) {
let server_ts: i64 = next_batch.next().map(str::parse).transpose()?.unwrap_or(0);
let event_sn: i64 = next_batch.next().map(str::parse).transpose()?.unwrap_or(0);
data_query = data_query
.filter(event_searches::origin_server_ts.le(server_ts))
.filter(event_searches::event_sn.lt(event_sn));
}
let data_query = data_query
.select((
ts_rank_cd(
event_searches::vector,
websearch_to_tsquery(&criteria.search_term),
),
// event_searches::room_id,
event_searches::event_id,
event_searches::event_sn,
event_searches::origin_server_ts,
// event_searches::stream_ordering,
))
.limit(limit as i64);
let items = if criteria.order_by == Some(OrderBy::Rank) {
data_query
.order_by(diesel::dsl::sql::<diesel::sql_types::Int8>("1"))
.load::<(f32, OwnedEventId, i64, i64)>(&mut connect()?)?
} else {
data_query
.order_by(event_searches::origin_server_ts.desc())
.then_order_by(event_searches::event_sn.desc())
.load::<(f32, OwnedEventId, i64, i64)>(&mut connect()?)?
};
// let _ids: Vec<i64> = event_searches::table
// .select(event_searches::id)
// .load(&mut connect()?)?;
let count: i64 = base_query.count().first(&mut connect()?)?;
let next_batch = if items.len() < limit {
None
} else if let Some(last) = items.last() {
if criteria.order_by == Some(OrderBy::Recent) || criteria.order_by.is_none() {
Some(format!("{}-{}", last.3, last.2))
} else {
None
}
} else {
None
};
let results: Vec<_> = items
.into_iter()
.filter_map(|(rank, event_id, _, _)| {
let pdu = timeline::get_pdu(&event_id).ok()?;
if state::user_can_see_event(user_id, &pdu.event_id).unwrap_or(false) {
Some((rank, pdu))
} else {
None
}
})
.map(|(rank, pdu)| SearchResult {
context: calc_event_context(user_id, &pdu.room_id, pdu.event_sn, 10, 10, false)
.unwrap_or_default(),
rank: Some(rank as f64),
result: Some(pdu.to_room_event()),
})
.collect();
Ok(ResultRoomEvents {
count: Some(count as u64),
groups: BTreeMap::new(), // TODO
next_batch,
results,
state: BTreeMap::new(), // TODO
highlights: criteria
.search_term
.split_terminator(|c: char| !c.is_alphanumeric())
.map(str::to_lowercase)
.collect(),
})
}
// Calculates the contextual events for any search results.
fn calc_event_context(
user_id: &UserId,
room_id: &RoomId,
event_sn: Seqnum,
before_limit: usize,
after_limit: usize,
include_profile: bool,
) -> AppResult<EventContextResult> {
let before_pdus = timeline::stream::load_pdus_backward(
Some(user_id),
room_id,
Some(BatchToken::new_live(event_sn - 1)),
None,
None,
before_limit,
)?;
let after_pdus = timeline::stream::load_pdus_forward(
Some(user_id),
room_id,
Some(BatchToken::new_live(event_sn + 1)),
None,
None,
after_limit,
)?;
let mut profile = BTreeMap::new();
if include_profile && let Ok(frame_id) = crate::event::get_frame_id(room_id, event_sn) {
let RoomMemberEventContent {
display_name,
avatar_url,
..
} = state::get_state_content(frame_id, &StateEventType::RoomMember, user_id.as_str())?;
if let Some(display_name) = display_name {
profile.insert("displayname".to_string(), display_name);
}
if let Some(avatar_url) = avatar_url {
profile.insert("avatar_url".to_string(), avatar_url.to_string());
}
}
let context = EventContextResult {
start: before_pdus.first().map(|(sn, _)| sn.to_string()),
end: after_pdus.last().map(|(sn, _)| sn.to_string()),
events_before: before_pdus
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect(),
events_after: after_pdus
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect(),
profile_info: BTreeMap::new(),
};
Ok(context)
}
pub fn save_pdu(pdu: &SnPduEvent, pdu_json: &CanonicalJsonObject) -> AppResult<()> {
let Some(CanonicalJsonValue::Object(content)) = pdu_json.get("content") else {
return Ok(());
};
let Some((key, vector)) = (match pdu.event_ty {
TimelineEventType::RoomName => content
.get("name")
.and_then(|v| v.as_str())
.map(|v| ("content.name", v)),
TimelineEventType::RoomTopic => content
.get("topic")
.and_then(|v| v.as_str())
.map(|v| ("content.topic", v)),
TimelineEventType::RoomMessage => content
.get("body")
.and_then(|v| v.as_str())
.map(|v| ("content.message", v)),
TimelineEventType::RoomRedaction => {
// TODO: Redaction
return Ok(());
}
_ => {
return Ok(());
}
}) else {
return Ok(());
};
diesel::sql_query("INSERT INTO event_searches (event_id, event_sn, room_id, sender_id, key, vector, origin_server_ts) VALUES ($1, $2, $3, $4, $5, to_tsvector('english', $6), $7) ON CONFLICT (event_id) DO UPDATE SET vector = to_tsvector('english', $6), origin_server_ts = $7")
.bind::<diesel::sql_types::Text, _>(pdu.event_id.as_str())
.bind::<diesel::sql_types::Nullable<diesel::sql_types::Int8>, _>(pdu.event_sn)
.bind::<diesel::sql_types::Text, _>(&pdu.room_id)
.bind::<diesel::sql_types::Text, _>(&pdu.sender)
.bind::<diesel::sql_types::Text, _>(key)
.bind::<diesel::sql_types::Text, _>(vector)
.bind::<diesel::sql_types::Int8, _>(pdu.origin_server_ts)
.bind::<diesel::sql_types::Text, _>(vector)
.bind::<diesel::sql_types::Int8, _>(pdu.origin_server_ts)
.execute(&mut connect()?)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/event/resolver.rs | crates/server/src/event/resolver.rs | use std::collections::{BTreeSet, HashMap, HashSet};
use std::sync::Arc;
use diesel::prelude::*;
use indexmap::IndexMap;
use crate::core::identifiers::*;
use crate::core::room_version_rules::{RoomVersionRules, StateResolutionV2Rules};
use crate::core::state::{Event, StateError, StateMap, resolve};
use crate::data::connect;
use crate::data::schema::*;
use crate::event::PduEvent;
use crate::room::state::{CompressedState, DbRoomStateField};
use crate::room::{state, timeline};
use crate::utils::SeqnumQueueGuard;
use crate::{AppError, AppResult, room};
pub async fn resolve_state(
room_id: &RoomId,
room_version_id: &RoomVersionId,
incoming_state: IndexMap<i64, OwnedEventId>,
) -> AppResult<(Arc<CompressedState>, Vec<SeqnumQueueGuard>)> {
debug!("loading current room state ids");
let current_state_ids = if let Ok(current_frame_id) = crate::room::get_frame_id(room_id, None) {
state::get_full_state_ids(current_frame_id)?
} else {
IndexMap::new()
};
debug!("loading fork states");
let fork_states = [current_state_ids, incoming_state];
let mut auth_chain_sets = Vec::new();
for state in &fork_states {
auth_chain_sets.push(crate::room::auth_chain::get_auth_chain_ids(
room_id,
state.values().map(|e| &**e),
)?);
}
let fork_states: Vec<_> = fork_states
.into_iter()
.map(|map| {
map.into_iter()
.filter_map(|(k, event_id)| {
state::get_field(k)
.map(
|DbRoomStateField {
event_ty,
state_key,
..
}| {
((event_ty.to_string().into(), state_key), event_id)
},
)
.ok()
})
.collect::<StateMap<_>>()
})
.collect();
debug!("resolving state");
let version_rules = crate::room::get_version_rules(room_version_id)?;
let state = match crate::core::state::resolve(
&version_rules.authorization,
version_rules
.state_resolution
.v2_rules()
.unwrap_or(StateResolutionV2Rules::V2_0),
&fork_states,
auth_chain_sets
.iter()
.map(|set| set.iter().map(|id| id.to_owned()).collect::<HashSet<_>>())
.collect::<Vec<_>>(),
&async |id| timeline::get_pdu(&id).map_err(|_| StateError::other("missing pdu 4")),
|map| {
let mut subgraph = HashSet::new();
for event_ids in map.values() {
for event_id in event_ids {
if let Ok(pdu) = timeline::get_pdu(event_id) {
subgraph.extend(pdu.auth_events.iter().cloned());
subgraph.extend(pdu.prev_events.iter().cloned());
}
}
}
let subgraph = events::table
.filter(events::id.eq_any(subgraph))
.filter(events::state_key.is_not_null())
.select(events::id)
.load::<OwnedEventId>(&mut connect().unwrap())
.unwrap()
.into_iter()
.collect::<HashSet<_>>();
Some(subgraph)
},
)
.await
{
Ok(new_state) => new_state,
Err(e) => {
error!("state resolution failed: {}", e);
return Err(AppError::internal(
"state resolution failed, either an event could not be found or deserialization",
));
}
};
debug!("state resolution done, compressing state");
let mut new_room_state = BTreeSet::new();
let mut guards = Vec::new();
for ((event_type, state_key), event_id) in state {
let state_key_id = state::ensure_field_id(&event_type.to_string().into(), &state_key)?;
let (event_sn, guard) = crate::event::ensure_event_sn(room_id, &event_id)?;
if let Some(guard) = guard {
guards.push(guard);
}
new_room_state.insert(state::compress_event(room_id, state_key_id, event_sn)?);
}
Ok((Arc::new(new_room_state), guards))
}
// pub(super) async fn state_at_incoming_degree_one(
// incoming_pdu: &PduEvent,
// ) -> AppResult<IndexMap<i64, OwnedEventId>> {
// let room_id = &incoming_pdu.room_id;
// let prev_event = &*incoming_pdu.prev_events[0];
// let Ok(prev_frame_id) =
// state::get_pdu_frame_id(prev_event).or_else(|_| room::get_frame_id(room_id, None))
// else {
// return Ok(IndexMap::new());
// };
// let Ok(mut state) = state::get_full_state_ids(prev_frame_id) else {
// return Ok(IndexMap::new());
// };
// debug!("using cached state");
// let prev_pdu = timeline::get_pdu(prev_event)?;
// if let Some(state_key) = &prev_pdu.state_key {
// let state_key_id =
// state::ensure_field_id(&prev_pdu.event_ty.to_string().into(), state_key)?;
// state.insert(state_key_id, prev_event.to_owned());
// // Now it's the state after the pdu
// }
// Ok(state)
// }
pub(super) async fn resolve_state_at_incoming(
incoming_pdu: &PduEvent,
version_rules: &RoomVersionRules,
) -> AppResult<Option<IndexMap<i64, OwnedEventId>>> {
debug!("calculating state at event using state resolve");
let mut extremity_state_hashes = HashMap::new();
for prev_event_id in &incoming_pdu.prev_events {
let Ok(prev_event) = timeline::get_pdu(prev_event_id) else {
return Ok(None);
};
if prev_event.rejected() {
continue;
}
if let Ok(frame_id) = state::get_pdu_frame_id(prev_event_id) {
extremity_state_hashes.insert(frame_id, prev_event);
} else {
return Ok(None);
}
}
let mut fork_states = Vec::with_capacity(extremity_state_hashes.len());
let mut auth_chain_sets = Vec::with_capacity(extremity_state_hashes.len());
for (frame_id, prev_event) in extremity_state_hashes {
let mut leaf_state = state::get_full_state_ids(frame_id)?;
if let Some(state_key) = &prev_event.state_key {
let state_key_id =
state::ensure_field_id(&prev_event.event_ty.to_string().into(), state_key)?;
leaf_state.insert(state_key_id, prev_event.event_id.clone());
// Now it's the state after the pdu
}
let mut state = StateMap::with_capacity(leaf_state.len());
let mut starting_events = Vec::with_capacity(leaf_state.len());
for (k, id) in leaf_state {
if let Ok(DbRoomStateField {
event_ty,
state_key,
..
}) = state::get_field(k)
{
// FIXME: Undo .to_string().into() when StateMap is updated to use StateEventType
state.insert((event_ty.to_string().into(), state_key), id.clone());
} else {
warn!("failed to get_state_key_id");
}
starting_events.push(id);
}
for starting_event in starting_events {
auth_chain_sets.push(crate::room::auth_chain::get_auth_chain_ids(
&incoming_pdu.room_id,
[&*starting_event].into_iter(),
)?);
}
fork_states.push(state);
}
let state_lock = room::lock_state(&incoming_pdu.room_id).await;
let result = resolve(
&version_rules.authorization,
version_rules
.state_resolution
.v2_rules()
.unwrap_or(StateResolutionV2Rules::V2_0),
&fork_states,
auth_chain_sets
.iter()
.map(|set| set.iter().map(|id| id.to_owned()).collect::<HashSet<_>>())
.collect::<Vec<_>>(),
&async |event_id| {
timeline::get_pdu(&event_id)
.map(|s| s.pdu)
.map_err(|_| StateError::other("missing pdu 5"))
},
|map| {
let mut subgraph = HashSet::new();
for event_ids in map.values() {
for event_id in event_ids {
if let Ok(pdu) = timeline::get_pdu(event_id) {
subgraph.extend(pdu.auth_events.iter().cloned());
subgraph.extend(pdu.prev_events.iter().cloned());
}
}
}
Some(subgraph)
},
)
.await;
drop(state_lock);
match result {
Ok(new_state) => Ok(Some(
new_state
.into_iter()
.map(|((event_type, state_key), event_id)| {
let state_key_id =
state::ensure_field_id(&event_type.to_string().into(), &state_key)?;
Ok((state_key_id, event_id))
})
// .chain(outlier_state.into_iter().map(|(k, v)| Ok((k, v))))
.collect::<AppResult<_>>()?,
)),
Err(e) => {
warn!("state resolution on prev events failed: {}", e);
Ok(None)
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/event/handler.rs | crates/server/src/event/handler.rs | use std::borrow::Borrow;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::iter::once;
use std::sync::Arc;
use std::time::Instant;
use diesel::prelude::*;
use indexmap::IndexMap;
use palpo_core::Direction;
use super::fetching::fetch_and_process_missing_state;
use super::resolver::{resolve_state, resolve_state_at_incoming};
use crate::core::events::room::server_acl::RoomServerAclEventContent;
use crate::core::events::{StateEventType, TimelineEventType};
use crate::core::federation::event::timestamp_to_event_request;
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::core::room_version_rules::RoomVersionRules;
use crate::core::serde::{CanonicalJsonObject, CanonicalJsonValue, JsonValue, canonical_json};
use crate::core::signatures::Verified;
use crate::core::state::{Event, StateError, event_auth};
use crate::core::{Seqnum, UnixMillis};
use crate::data::room::DbEvent;
use crate::data::{connect, diesel_exists, schema::*};
use crate::event::{OutlierPdu, PduEvent, SnPduEvent, handler};
use crate::room::state::{CompressedState, DeltaInfo, update_backward_extremities};
use crate::room::{state, timeline};
use crate::sending::send_federation_request;
use crate::{AppError, AppResult, MatrixError, exts::*, room};
#[tracing::instrument(skip_all)]
pub(crate) async fn process_incoming_pdu(
remote_server: &ServerName,
event_id: &EventId,
room_id: &RoomId,
room_version_id: &RoomVersionId,
value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool,
is_backfill: bool,
) -> AppResult<()> {
if !crate::room::room_exists(room_id)? {
return Err(MatrixError::not_found("room is unknown to this server").into());
}
let event = events::table
.filter(events::id.eq(event_id))
.first::<DbEvent>(&mut connect()?);
if let Ok(event) = event {
if !event.is_outlier {
return Ok(());
}
if event.is_rejected || event.soft_failed {
diesel::delete(&event).execute(&mut connect()?).ok();
diesel::delete(event_points::table.filter(event_points::event_id.eq(event_id)))
.execute(&mut connect()?)
.ok();
diesel::delete(event_datas::table.filter(event_datas::event_id.eq(event_id)))
.execute(&mut connect()?)
.ok();
}
}
// 1.2 Check if the room is disabled
if crate::room::is_disabled(room_id)? {
return Err(MatrixError::forbidden(
"federation of this room is currently disabled on this server",
None,
)
.into());
}
// 1.3.1 Check room ACL on origin field/server
handler::acl_check(remote_server, room_id)?;
// 1.3.2 Check room ACL on sender's server name
let sender: OwnedUserId = serde_json::from_value(
value
.get("sender")
.ok_or_else(|| MatrixError::invalid_param("pdu does not have a valid sender key: {e}"))?
.clone()
.into(),
)
.map_err(|_| MatrixError::bad_json("user id in sender is invalid."))?;
if sender.server_name().ne(remote_server) {
handler::acl_check(sender.server_name(), room_id)?;
}
// 1. Skip the PDU if we already have it as a timeline event
if state::get_pdu_frame_id(event_id).is_ok() {
return Ok(());
}
let Some(outlier_pdu) =
process_to_outlier_pdu(remote_server, event_id, room_id, room_version_id, value).await?
else {
return Ok(());
};
let (incoming_pdu, val, event_guard) = outlier_pdu
.process_incoming(remote_server, is_backfill)
.await?;
if incoming_pdu.rejected() {
return Ok(());
}
check_room_id(room_id, &incoming_pdu)?;
// 8. if not timeline event: stop
if !is_timeline_event {
return Ok(());
}
// Skip old events
// let first_pdu_in_room = timeline::first_pdu_in_room(room_id)?
// .ok_or_else(|| AppError::internal("failed to find first pdu in database"))?;
// if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts {
// return Ok(());
// }
// Done with prev events, now handling the incoming event
let start_time = Instant::now();
crate::ROOM_ID_FEDERATION_HANDLE_TIME
.write()
.unwrap()
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
if let Err(e) = process_to_timeline_pdu(incoming_pdu, val, Some(remote_server)).await {
error!("failed to process incoming pdu to timeline {}", e);
} else {
debug!("succeed to process incoming pdu to timeline {}", event_id);
let pdu = timeline::get_pdu(event_id)?;
update_backward_extremities(&pdu)?;
}
drop(event_guard);
crate::ROOM_ID_FEDERATION_HANDLE_TIME
.write()
.unwrap()
.remove(&room_id.to_owned());
Ok(())
}
#[tracing::instrument(skip_all)]
pub(crate) async fn process_pulled_pdu(
remote_server: &ServerName,
event_id: &EventId,
room_id: &RoomId,
room_version_id: &RoomVersionId,
value: BTreeMap<String, CanonicalJsonValue>,
is_backfill: bool,
) -> AppResult<()> {
// 1.3.1 Check room ACL on origin field/server
handler::acl_check(remote_server, room_id)?;
// 1.3.2 Check room ACL on sender's server name
let sender: OwnedUserId = serde_json::from_value(
value
.get("sender")
.ok_or_else(|| MatrixError::invalid_param("pdu does not have a valid sender key: {e}"))?
.clone()
.into(),
)
.map_err(|_| MatrixError::bad_json("user id in sender is invalid"))?;
if sender.server_name().ne(remote_server) {
handler::acl_check(sender.server_name(), room_id)?;
}
// 1. Skip the PDU if we already have it as a timeline event
if state::get_pdu_frame_id(event_id).is_ok() {
return Ok(());
}
let Some(outlier_pdu) =
process_to_outlier_pdu(remote_server, event_id, room_id, room_version_id, value).await?
else {
return Ok(());
};
let (pdu, json_data, _) = outlier_pdu
.process_pulled(remote_server, is_backfill)
.await?;
if pdu.soft_failed || pdu.rejected() {
return Ok(());
}
if let Err(e) = process_to_timeline_pdu(pdu, json_data, Some(remote_server)).await {
error!("failed to process pulled pdu to timeline: {}", e);
} else {
debug!("succeed to process incoming pdu to timeline {}", event_id);
let pdu = timeline::get_pdu(event_id)?;
update_backward_extremities(&pdu)?;
let mut next_ids = event_missings::table
.filter(event_missings::room_id.eq(&pdu.room_id))
.filter(event_missings::missing_id.eq(&pdu.event_id))
.select(event_missings::event_id)
.load::<OwnedEventId>(&mut connect()?)?;
while !next_ids.is_empty() {
let mut new_timlined_event_ids = Vec::new();
for next_id in next_ids {
diesel::delete(
event_missings::table
.filter(event_missings::room_id.eq(&pdu.room_id))
.filter(event_missings::event_id.eq(&next_id))
.filter(event_missings::missing_id.eq(&pdu.event_id)),
)
.execute(&mut connect()?)?;
let query = event_missings::table.filter(event_missings::event_id.eq(&next_id));
if !diesel_exists!(query, &mut connect()?)? {
diesel::delete(
timeline_gaps::table
.filter(timeline_gaps::room_id.eq(&pdu.room_id))
.filter(timeline_gaps::event_id.eq(&next_id)),
)
.execute(&mut connect()?)?;
// let query = event_phases::table
// .filter(event_phases::event_id.eq(&event_id))
// .filter(event_phases::goal.eq("timeline"));
// if diesel_exists!(query, &mut connect()?)? {
if let Ok(pdu) = timeline::get_pdu(&next_id)
&& pdu.is_outlier
&& !pdu.rejected()
{
let content = pdu.get_content()?;
if let Err(e) =
process_to_timeline_pdu(pdu, content, Some(remote_server)).await
{
error!("failed to process incoming pdu to timeline {}", e);
} else {
debug!("succeed to process incoming pdu to timeline {}", next_id);
new_timlined_event_ids.push(next_id);
}
} else {
warn!("cannot find outlier pdu: {}", next_id);
}
// }
}
}
next_ids = new_timlined_event_ids;
}
}
Ok(())
}
#[tracing::instrument(skip_all)]
pub async fn process_to_outlier_pdu(
remote_server: &ServerName,
event_id: &EventId,
room_id: &RoomId,
room_version: &RoomVersionId,
mut value: CanonicalJsonObject,
) -> AppResult<Option<OutlierPdu>> {
if let Some((room_id, event_sn, event_data)) = event_datas::table
.filter(event_datas::event_id.eq(event_id))
.select((
event_datas::room_id,
event_datas::event_sn,
event_datas::json_data,
))
.first::<(OwnedRoomId, Seqnum, JsonValue)>(&mut connect()?)
.optional()?
&& let Ok(val) = serde_json::from_value::<CanonicalJsonObject>(event_data.clone())
&& let Ok(pdu) = timeline::get_pdu(event_id)
&& !pdu.soft_failed
&& !pdu.is_outlier
&& !pdu.rejected()
{
return Ok(Some(OutlierPdu {
pdu: pdu.into_inner(),
json_data: val,
soft_failed: false,
remote_server: remote_server.to_owned(),
room_id: room_id.to_owned(),
room_version: room_version.to_owned(),
event_sn: Some(event_sn),
rejected_auth_events: vec![],
rejected_prev_events: vec![],
}));
}
// 1.1. Remove unsigned field
value.remove("unsigned");
let version_rules = crate::room::get_version_rules(room_version)?;
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
error!("invalid pdu, no origin_server_ts field");
MatrixError::missing_param("invalid pdu, no origin_server_ts field")
})?;
let _origin_server_ts = {
let ts = origin_server_ts
.as_integer()
.ok_or_else(|| MatrixError::invalid_param("origin_server_ts must be an integer"))?;
UnixMillis(
ts.try_into()
.map_err(|_| MatrixError::invalid_param("time must be after the unix epoch"))?,
)
};
let mut val = match crate::server_key::verify_event(&value, room_version).await {
Ok(Verified::Signatures) => {
// Redact
warn!("calculated hash does not match: {}", event_id);
let obj = match canonical_json::redact(value, &version_rules.redaction, None) {
Ok(obj) => obj,
Err(_) => return Err(MatrixError::invalid_param("redaction failed").into()),
};
// Skip the PDU if it is redacted and we already have it as an outlier event
if timeline::get_pdu_json(event_id)?.is_some() {
return Err(MatrixError::invalid_param(
"event was redacted and we already knew about it",
)
.into());
}
obj
}
Ok(Verified::All) => value,
Err(e) => {
warn!("dropping bad event {}: {} {value:#?}", event_id, e,);
return Err(MatrixError::invalid_param("signature verification failed").into());
}
};
// Now that we have checked the signature and hashes we can add the eventID and convert
// to our PduEvent type
val.insert(
"event_id".to_owned(),
CanonicalJsonValue::String(event_id.as_str().to_owned()),
);
let mut incoming_pdu = PduEvent::from_json_value(
room_id,
event_id,
serde_json::to_value(&val).expect("`CanonicalJson` is a valid `JsonValue`"),
)
.map_err(|_| AppError::internal("event is not a valid PDU."))?;
check_room_id(room_id, &incoming_pdu)?;
let server_joined = crate::room::is_server_joined(crate::config::server_name(), room_id)?;
if !server_joined {
if let Some(state_key) = incoming_pdu.state_key.as_deref()
&& incoming_pdu.event_ty == TimelineEventType::RoomMember
// && state_key.ends_with(&*format!(":{}", crate::config::server_name()))
{
debug!("added pdu as outlier");
return Ok(Some(OutlierPdu {
pdu: incoming_pdu,
json_data: val,
soft_failed: false,
remote_server: remote_server.to_owned(),
room_id: room_id.to_owned(),
room_version: room_version.to_owned(),
event_sn: None,
rejected_auth_events: vec![],
rejected_prev_events: vec![],
}));
}
return Ok(None);
}
let mut soft_failed = false;
let (prev_events, missing_prev_event_ids) =
timeline::get_may_missing_pdus(room_id, &incoming_pdu.prev_events)?;
if !missing_prev_event_ids.is_empty() {
warn!(
"process event to outlier missing prev events {}: {:?}",
incoming_pdu.event_id, missing_prev_event_ids
);
soft_failed = true;
}
let rejected_prev_events = prev_events
.iter()
.filter_map(|pdu| {
if pdu.rejected() {
Some(pdu.event_id.clone())
} else {
None
}
})
.collect::<Vec<_>>();
if !rejected_prev_events.is_empty() {
incoming_pdu.rejection_reason = Some(format!(
"event's prev events rejected: {rejected_prev_events:?}"
));
// soft_failed = true; // Will try to fetch rejected prev events again later
}
let (auth_events, missing_auth_event_ids) =
timeline::get_may_missing_pdus(room_id, &incoming_pdu.auth_events)?;
if !missing_auth_event_ids.is_empty() {
warn!(
"process event to outlier missing auth events {}: {:?}",
incoming_pdu.event_id, missing_auth_event_ids
);
soft_failed = true;
}
let rejected_auth_events = auth_events
.iter()
.filter_map(|pdu| {
if pdu.rejected() {
Some(pdu.event_id.clone())
} else {
None
}
})
.collect::<Vec<_>>();
if !rejected_auth_events.is_empty() {
incoming_pdu.rejection_reason = Some(format!(
"event's auth events rejected: {rejected_auth_events:?}"
))
}
let auth_events = auth_events
.into_iter()
.map(|auth_event| {
(
(
auth_event.event_ty.to_string().into(),
auth_event.state_key.clone().unwrap_or_default(),
),
auth_event,
)
})
.collect::<HashMap<(StateEventType, _), _>>();
// The original create event must be in the auth events
if !matches!(
auth_events.get(&(StateEventType::RoomCreate, "".to_owned())),
Some(_) | None
) {
incoming_pdu.rejection_reason =
Some("incoming event refers to wrong create event".to_owned());
}
if incoming_pdu.rejection_reason.is_none() {
if let Err(e) = auth_check(&incoming_pdu, &version_rules, None).await {
match e {
AppError::State(StateError::Forbidden(brief)) => {
incoming_pdu.rejection_reason = Some(brief);
}
_ => {
soft_failed = true;
}
}
} else {
soft_failed = false;
}
}
Ok(Some(OutlierPdu {
pdu: incoming_pdu,
soft_failed,
json_data: val,
remote_server: remote_server.to_owned(),
room_id: room_id.to_owned(),
room_version: room_version.to_owned(),
event_sn: None,
rejected_auth_events,
rejected_prev_events,
}))
}
#[tracing::instrument(skip(incoming_pdu, json_data))]
pub async fn process_to_timeline_pdu(
incoming_pdu: SnPduEvent,
json_data: CanonicalJsonObject,
remote_server: Option<&ServerName>,
) -> AppResult<()> {
// Skip the PDU if we already have it as a timeline event
if !incoming_pdu.is_outlier {
return Ok(());
}
if incoming_pdu.rejected() {
return Err(AppError::internal(
"cannot process rejected event to timeline",
));
}
debug!("process to timeline event {}", incoming_pdu.event_id);
let room_version_id = &room::get_version(&incoming_pdu.room_id)?;
let version_rules = crate::room::get_version_rules(room_version_id)?;
// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
// doing all the checks in this list starting at 1. These are not timeline events.
debug!("resolving state at event");
let server_joined =
crate::room::is_server_joined(crate::config::server_name(), &incoming_pdu.room_id)?;
if !server_joined {
if let Some(state_key) = incoming_pdu.state_key.as_deref()
&& incoming_pdu.event_ty == TimelineEventType::RoomMember
&& state_key != incoming_pdu.sender().as_str() //????
&& state_key.ends_with(&*format!(":{}", crate::config::server_name()))
{
// let state_at_incoming_event = state_at_incoming_degree_one(&incoming_pdu).await?;
let state_at_incoming_event = resolve_state_at_incoming(&incoming_pdu, &version_rules)
.await
.ok()
.flatten()
.unwrap_or_default();
// 13. Use state resolution to find new room state
let state_lock = crate::room::lock_state(&incoming_pdu.room_id).await;
// Now that the event has passed all auth it is added into the timeline.
// We use the `state_at_event` instead of `state_after` so we accurately
// represent the state for this event.
debug!("compressing state at event");
let compressed_state_ids = Arc::new(
state_at_incoming_event
.iter()
.map(|(field_id, event_id)| {
state::compress_event(
&incoming_pdu.room_id,
*field_id,
crate::event::ensure_event_sn(&incoming_pdu.room_id, event_id)?.0,
)
})
.collect::<AppResult<_>>()?,
);
debug!("preparing for stateres to derive new room state");
// We also add state after incoming event to the fork states
// let mut state_after = state_at_incoming_event.clone();
let state_key_id =
state::ensure_field_id(&incoming_pdu.event_ty.to_string().into(), state_key)?;
let compressed_event =
state::compress_event(&incoming_pdu.room_id, state_key_id, incoming_pdu.event_sn)?;
let mut new_room_state = CompressedState::new();
new_room_state.insert(compressed_event);
// Set the new room state to the resolved state
debug!("forcing new room state");
let DeltaInfo {
frame_id,
appended,
disposed,
} = state::save_state(&incoming_pdu.room_id, Arc::new(new_room_state))?;
state::force_state(&incoming_pdu.room_id, frame_id, appended, disposed)?;
debug!("appended incoming pdu");
timeline::append_pdu(&incoming_pdu, json_data, &state_lock).await?;
state::set_event_state(
&incoming_pdu.event_id,
incoming_pdu.event_sn,
&incoming_pdu.room_id,
compressed_state_ids,
)?;
drop(state_lock);
}
return Ok(());
}
let state_at_incoming_event = resolve_state_at_incoming(&incoming_pdu, &version_rules).await?;
let state_at_incoming_event = if let Some(state_at_incoming_event) = state_at_incoming_event {
state_at_incoming_event
} else if let Some(remote_server) = remote_server {
fetch_and_process_missing_state(
remote_server,
&incoming_pdu.room_id,
room_version_id,
&incoming_pdu.event_id,
)
.await?
.state_events
} else {
return Err(AppError::internal(
"cannot process to timeline without state at event",
));
};
auth_check(
&incoming_pdu,
&version_rules,
Some(&state_at_incoming_event),
)
.await?;
// Soft fail check before doing state res
debug!("performing soft-fail check");
let soft_fail = match incoming_pdu.redacts_id(room_version_id) {
None => false,
Some(redact_id) => {
!state::user_can_redact(
&redact_id,
&incoming_pdu.sender,
&incoming_pdu.room_id,
true,
)
.await?
}
};
// 13. Use state resolution to find new room state
let state_lock = crate::room::lock_state(&incoming_pdu.room_id).await;
// Only keep those extremities were not referenced yet
// extremities.retain(|id| !matches!(crate::room::pdu_metadata::is_event_referenced(room_id, id), Ok(true)));
debug!("compressing state at event");
let compressed_state_ids = Arc::new(
state_at_incoming_event
.iter()
.map(|(field_id, event_id)| {
state::compress_event(
&incoming_pdu.room_id,
*field_id,
crate::event::ensure_event_sn(&incoming_pdu.room_id, event_id)?.0,
)
})
.collect::<AppResult<_>>()?,
);
let guards = if let Some(state_key) = &incoming_pdu.state_key {
debug!("preparing for stateres to derive new room state");
// We also add state after incoming event to the fork states
let mut state_after = state_at_incoming_event.clone();
let state_key_id =
state::ensure_field_id(&incoming_pdu.event_ty.to_string().into(), state_key)?;
state_after.insert(state_key_id, incoming_pdu.event_id.clone());
let (new_room_state, guards) =
resolve_state(&incoming_pdu.room_id, room_version_id, state_after).await?;
// Set the new room state to the resolved state
debug!("forcing new room state");
let DeltaInfo {
frame_id,
appended,
disposed,
} = state::save_state(&incoming_pdu.room_id, new_room_state)?;
state::force_state(&incoming_pdu.room_id, frame_id, appended, disposed)?;
guards
} else {
vec![]
};
// Now that the event has passed all auth it is added into the timeline.
// We use the `state_at_event` instead of `state_after` so we accurately
// represent the state for this event.
let event_id = incoming_pdu.event_id.clone();
// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it
if soft_fail {
debug!("starting soft fail auth check");
// We start looking at current room state now, so lets lock the room
// Now we calculate the set of extremities this room has after the incoming event has been
// applied. We start with the previous extremities (aka leaves)
debug!("calculating extremities");
let mut extremities: BTreeSet<_> = state::get_forward_extremities(&incoming_pdu.room_id)?
.into_iter()
.collect();
// Remove any forward extremities that are referenced by this incoming event's prev_events
extremities.retain(|event_id| !incoming_pdu.prev_events.contains(event_id));
let extremities = extremities
.iter()
.map(Borrow::borrow)
.chain(once(event_id.borrow()));
state::set_forward_extremities(&incoming_pdu.room_id, extremities, &state_lock)?;
state::update_backward_extremities(&incoming_pdu)?;
// Soft fail, we keep the event as an outlier but don't add it to the timeline
warn!("event was soft failed: {:?}", incoming_pdu);
crate::room::pdu_metadata::mark_event_soft_failed(&incoming_pdu.event_id)?;
return Err(MatrixError::invalid_param("event has been soft failed").into());
} else {
debug!("appended incoming pdu");
timeline::append_pdu(&incoming_pdu, json_data, &state_lock).await?;
state::set_event_state(
&incoming_pdu.event_id,
incoming_pdu.event_sn,
&incoming_pdu.room_id,
compressed_state_ids,
)?;
}
drop(guards);
// Event has passed all auth/stateres checks
drop(state_lock);
Ok(())
}
pub async fn remote_timestamp_to_event(
remote_servers: &[OwnedServerName],
room_id: &RoomId,
dir: Direction,
ts: UnixMillis,
exist: Option<&(OwnedEventId, UnixMillis)>,
) -> AppResult<(OwnedServerName, TimestampToEventResBody)> {
async fn remote_event(
remote_server: &ServerName,
room_id: &RoomId,
dir: Direction,
ts: UnixMillis,
) -> AppResult<TimestampToEventResBody> {
let request = timestamp_to_event_request(
&remote_server.origin().await,
TimestampToEventReqArgs {
room_id: room_id.to_owned(),
dir,
ts,
},
)?
.into_inner();
let res_body = send_federation_request(remote_server, request, None)
.await?
.json::<TimestampToEventResBody>()
.await?;
Ok(res_body)
}
for remote_server in remote_servers {
if let Ok(res_body) = remote_event(remote_server, room_id, dir, ts).await {
if let Some((_exist_id, exist_ts)) = exist {
match dir {
Direction::Forward => {
if res_body.origin_server_ts < *exist_ts {
return Ok((remote_server.to_owned(), res_body));
}
}
Direction::Backward => {
if res_body.origin_server_ts > *exist_ts {
return Ok((remote_server.to_owned(), res_body));
}
}
}
} else {
return Ok((remote_server.to_owned(), res_body));
};
}
}
Err(AppError::internal(
"failed to get timestamp to event from remote servers",
))
}
pub async fn auth_check(
incoming_pdu: &PduEvent,
version_rules: &RoomVersionRules,
state_at_incoming_event: Option<&IndexMap<i64, OwnedEventId>>,
) -> AppResult<()> {
let auth_rules = &version_rules.authorization;
let state_at_incoming_event = if let Some(state_at_incoming_event) = state_at_incoming_event {
state_at_incoming_event.to_owned()
} else if let Some(state_at_incoming_event) =
resolve_state_at_incoming(incoming_pdu, version_rules).await?
{
state_at_incoming_event
} else {
return Err(AppError::internal(
"cannot auth check event without state at event",
));
};
if !state_at_incoming_event.is_empty() {
debug!("performing auth check");
// 11. Check the auth of the event passes based on the state of the event
event_auth::auth_check(
auth_rules,
incoming_pdu,
&async |event_id| {
timeline::get_pdu( &event_id).map(|e|e.into_inner())
.map_err(|_| StateError::other("missing pdu in auth check event fetch"))
},
&async |k, s| {
let Ok(state_key_id) = state::get_field_id(&k.to_string().into(), &s) else {
warn!("missing field id for state type: {k}, state_key: {s}");
return Err(StateError::other(format!(
"missing field id for state type: {k}, state_key: {s}"
)));
};
match state_at_incoming_event.get(&state_key_id) {
Some(event_id) => match timeline::get_pdu(event_id) {
Ok(pdu) => Ok(pdu.into_inner()),
Err(e) => {
warn!("failed to get pdu for state resolution: {}", e);
Err(StateError::other(format!(
"failed to get pdu for state resolution: {}",
e
)))
}
},
None => {
warn!(
"missing state key id {state_key_id} for state type: {k}, state_key: {s}, room: {}", incoming_pdu.room_id
);
Err(StateError::other(format!(
"missing state key id {state_key_id} for state type: {k}, state_key: {s}, room: {}", incoming_pdu.room_id
)))
}
}
},
)
.await?;
debug!("auth check succeeded");
}
debug!("gathering auth events");
let auth_events = state::get_auth_events(
&incoming_pdu.room_id,
&incoming_pdu.event_ty,
&incoming_pdu.sender,
incoming_pdu.state_key.as_deref(),
&incoming_pdu.content,
auth_rules,
)?;
event_auth::auth_check(
auth_rules,
incoming_pdu,
&async |event_id| {
timeline::get_pdu(&event_id).map(|e|e.into_inner()).map_err(|_| StateError::other("missing pdu 3"))
},
&async |k, s| {
if let Some(pdu) = auth_events.get(&(k.clone(), s.to_string())).cloned() {
return Ok(pdu.into_inner());
}
if auth_rules.room_create_event_id_as_room_id && k == StateEventType::RoomCreate {
let pdu = crate::room::get_create(&incoming_pdu.room_id)
.map_err(|_| StateError::other("missing create event"))?;
if pdu.room_id != incoming_pdu.room_id {
Err(StateError::other("mismatched room id in create event"))
} else {
Ok(pdu.into_inner().into_inner())
}
} else {
Err(StateError::other(format!(
"failed auth check when process to timeline, missing state event, event_type: {k}, state_key:{s}"
)))
}
},
)
.await?;
Ok(())
}
/// Returns Ok if the acl allows the server
pub fn acl_check(server_name: &ServerName, room_id: &RoomId) -> AppResult<()> {
let acl_event = match room::get_state(room_id, &StateEventType::RoomServerAcl, "", None) {
Ok(acl) => acl,
Err(_) => return Ok(()),
};
let acl_event_content: RoomServerAclEventContent =
match acl_event.get_content::<RoomServerAclEventContent>() {
Ok(content) => content,
Err(_) => {
warn!("invalid ACL event");
return Ok(());
}
};
if acl_event_content.allow.is_empty() {
// Ignore broken acl events
return Ok(());
}
if acl_event_content.is_allowed(server_name) {
Ok(())
} else {
info!(
"server {} was denied by room ACL in {}",
server_name, room_id
);
Err(MatrixError::forbidden("server was denied by room ACL", None).into())
}
}
fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> AppResult<()> {
if pdu.room_id != room_id {
warn!("found event from room {} in room {}", pdu.room_id, room_id);
return Err(MatrixError::invalid_param("Event has wrong room id").into());
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | true |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/event/fetching.rs | crates/server/src/event/fetching.rs | use std::collections::HashSet;
use diesel::prelude::*;
use indexmap::IndexMap;
use salvo::http::StatusError;
use crate::core::federation::authorization::{EventAuthResBody, event_auth_request};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
RoomStateAtEventReqArgs, RoomStateIdsResBody, RoomStateReqArgs, RoomStateResBody,
event_request, missing_events_request, room_state_ids_request, room_state_request,
};
use crate::core::identifiers::*;
use crate::core::state::Event;
use crate::data::diesel_exists;
use crate::data::schema::*;
use crate::event::handler::{process_pulled_pdu, process_to_outlier_pdu};
use crate::event::{connect, parse_fetched_pdu, seen_event_ids};
use crate::room::state::ensure_field_id;
use crate::room::timeline;
use crate::sending::send_federation_request;
use crate::{AppResult, PduEvent, SnPduEvent, exts::*, room};
pub struct FetchedState {
pub state_events: IndexMap<i64, OwnedEventId>,
pub auth_events: IndexMap<i64, OwnedEventId>,
}
pub async fn fetch_and_process_missing_events(
remote_server: &ServerName,
room_id: &RoomId,
room_version: &RoomVersionId,
incoming_pdu: &PduEvent,
is_backfill: bool,
) -> AppResult<()> {
let min_depth = timeline::first_pdu_in_room(room_id)
.ok()
.and_then(|pdu| pdu.map(|p| p.depth))
.unwrap_or(0);
let mut fetched_events = IndexMap::with_capacity(10);
let earliest_events = room::state::get_forward_extremities(room_id)?;
let mut known_events = HashSet::new();
let mut missing_events = Vec::with_capacity(incoming_pdu.prev_events.len());
for prev_id in &incoming_pdu.prev_events {
let pdu = timeline::get_pdu(prev_id);
if let Ok(pdu) = &pdu {
if pdu.rejected() {
missing_events.push(prev_id.to_owned());
} else {
known_events.insert(prev_id.to_owned());
}
} else if !earliest_events.contains(prev_id) {
missing_events.push(prev_id.to_owned());
}
}
if missing_events.is_empty() {
return Ok(());
}
let request = missing_events_request(
&remote_server.origin().await,
room_id,
MissingEventsReqBody {
limit: 10,
min_depth,
earliest_events,
latest_events: vec![incoming_pdu.event_id.clone()],
},
)?
.into_inner();
let response = send_federation_request(remote_server, request, None).await?;
let res_body = response.json::<MissingEventsResBody>().await?;
for event in res_body.events {
let (event_id, event_val) = parse_fetched_pdu(room_id, room_version, &event)?;
if known_events.contains(&event_id) {
continue;
}
if fetched_events.contains_key(&event_id) || timeline::get_pdu(&event_id).is_ok() {
known_events.insert(event_id.clone());
continue;
}
fetched_events.insert(event_id.clone(), event_val);
known_events.insert(event_id.clone());
}
fetched_events.sort_by(|_x1, v1, _k2, v2| {
let depth1 = v1.get("depth").and_then(|v| v.as_integer()).unwrap_or(0);
let depth2 = v2.get("depth").and_then(|v| v.as_integer()).unwrap_or(0);
depth1.cmp(&depth2)
});
for (event_id, event_val) in fetched_events {
let is_exists = diesel_exists!(
events::table
.filter(events::id.eq(&event_id))
.filter(events::room_id.eq(&room_id)),
&mut connect()?
)?;
if is_exists {
continue;
}
if let Err(e) = process_pulled_pdu(
remote_server,
&event_id,
room_id,
room_version,
event_val.clone(),
is_backfill,
)
.await
{
error!(
"failed to process fetched missing prev event {}: {}",
event_id, e
);
}
}
Ok(())
}
pub async fn fetch_and_process_auth_chain(
remote_server: &ServerName,
room_id: &RoomId,
room_version: &RoomVersionId,
event_id: &EventId,
) -> AppResult<Vec<SnPduEvent>> {
let request =
event_auth_request(&remote_server.origin().await, room_id, event_id)?.into_inner();
let response = send_federation_request(remote_server, request, None).await?;
if !response.status().is_success()
&& let Some(status) = StatusError::from_code(response.status())
{
return Err(status.into());
}
let res_body = response.json::<EventAuthResBody>().await?;
let mut auth_events = Vec::new();
for event in res_body.auth_chain {
let (event_id, event_value) =
crate::event::parse_fetched_pdu(room_id, room_version, &event)?;
if let Ok(pdu) = timeline::get_pdu(&event_id) {
auth_events.push(pdu);
continue;
}
if !diesel_exists!(
events::table
.filter(events::id.eq(&event_id))
.filter(events::room_id.eq(&room_id)),
&mut connect()?
)? {
let Some(outlier_pdu) = process_to_outlier_pdu(
remote_server,
&event_id,
room_id,
room_version,
event_value,
)
.await?
else {
continue;
};
let pdu = outlier_pdu.save_to_database(true)?.0;
auth_events.push(pdu);
}
}
Ok(auth_events)
}
/// Call /state_ids to find out what the state at this pdu is. We trust the
/// server's response to some extend (sic), but we still do a lot of checks
/// on the events
pub(super) async fn fetch_and_process_missing_state_by_ids(
remote_server: &ServerName,
room_id: &RoomId,
room_version: &RoomVersionId,
event_id: &EventId,
) -> AppResult<Vec<OwnedEventId>> {
debug!("calling /state_ids");
// Call /state_ids to find out what the state at this pdu is. We trust the server's
// response to some extend, but we still do a lot of checks on the events
let RoomStateIdsResBody {
pdu_ids,
auth_chain_ids,
} = fetch_state_ids(remote_server, room_id, event_id).await?;
debug!("fetching state events at event");
let mut desired_events = pdu_ids;
desired_events.push(event_id.to_owned());
desired_events.extend(auth_chain_ids.into_iter());
let desired_count = desired_events.len();
let mut failed_missing_events = Vec::new();
let seen_events = seen_event_ids(room_id, &desired_events)?;
let missing_events: Vec<_> = desired_events
.into_iter()
.filter(|e| !seen_events.contains(e))
.collect();
// Same as synapse
// Making an individual request for each of 1000s of events has a lot of
// overhead. On the other hand, we don't really want to fetch all of the events
// if we already have most of them.
//
// As an arbitrary heuristic, if we are missing more than 10% of the events, then
// we fetch the whole state.
if missing_events.len() * 10 >= desired_count {
debug!("requesting complete state from remote");
fetch_and_process_missing_state(remote_server, room_id, room_version, event_id).await?;
} else {
debug!("fetching {} events from remote", missing_events.len());
let failed_events =
fetch_and_process_events(remote_server, room_id, room_version, &missing_events).await?;
if !failed_events.is_empty() {
failed_missing_events.extend(failed_events);
}
}
Ok(failed_missing_events)
}
pub async fn fetch_and_process_missing_state(
origin: &ServerName,
room_id: &RoomId,
room_version: &RoomVersionId,
event_id: &EventId,
) -> AppResult<FetchedState> {
debug!("fetching state events at event: {event_id}");
let request = room_state_request(
&origin.origin().await,
RoomStateReqArgs {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
},
)?
.into_inner();
let res_body = send_federation_request(origin, request, None)
.await?
.json::<RoomStateResBody>()
.await?;
let mut state_events: IndexMap<_, OwnedEventId> = IndexMap::new();
let mut auth_events: IndexMap<_, OwnedEventId> = IndexMap::new();
for pdu in &res_body.pdus {
let (event_id, event_val) = parse_fetched_pdu(room_id, room_version, pdu)?;
let event_type = match event_val.get("type") {
Some(v) => v.as_str().unwrap_or(""),
None => continue,
};
let state_key = match event_val.get("state_key") {
Some(v) => v.as_str().unwrap_or(""),
None => continue,
};
let field_id = ensure_field_id(&event_type.into(), state_key)?;
state_events.insert(field_id, event_id);
}
for event in &res_body.auth_chain {
let (event_id, event_val) = parse_fetched_pdu(room_id, room_version, event)?;
let event_type = match event_val.get("type") {
Some(v) => v.as_str().unwrap_or(""),
None => continue,
};
let state_key = match event_val.get("state_key") {
Some(v) => v.as_str().unwrap_or(""),
None => continue,
};
let field_id = ensure_field_id(&event_type.into(), state_key)?;
auth_events.insert(field_id, event_id);
}
Ok(FetchedState {
state_events,
auth_events,
})
}
pub async fn fetch_state_ids(
remote_server: &ServerName,
room_id: &RoomId,
event_id: &EventId,
) -> AppResult<RoomStateIdsResBody> {
debug!("calling /state_ids");
// Call /state_ids to find out what the state at this pdu is. We trust the server's
// response to some extend, but we still do a lot of checks on the events
let request = room_state_ids_request(
&remote_server.origin().await,
RoomStateAtEventReqArgs {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
},
)?
.into_inner();
let res_body = send_federation_request(remote_server, request, None)
.await?
.json::<RoomStateIdsResBody>()
.await?;
debug!("fetching state events at event: {event_id}");
Ok(res_body)
}
pub async fn fetch_event(
remote_server: &ServerName,
event_id: &EventId,
) -> AppResult<EventResBody> {
let request =
event_request(&remote_server.origin().await, EventReqArgs::new(event_id))?.into_inner();
let body = crate::sending::send_federation_request(remote_server, request, None)
.await?
.json::<EventResBody>()
.await?;
Ok(body)
}
pub async fn fetch_and_process_events(
remote_server: &ServerName,
room_id: &RoomId,
room_version_id: &RoomVersionId,
event_ids: &[OwnedEventId],
) -> AppResult<HashSet<OwnedEventId>> {
let mut done_ids = Vec::new();
for event_id in event_ids {
match fetch_and_process_event(remote_server, room_id, room_version_id, event_id).await {
Ok(_) => done_ids.push(event_id.clone()),
Err(e) => {
error!("failed to fetch/process event {event_id} : {e}");
}
}
}
Ok(event_ids
.iter()
.filter(|e| !done_ids.contains(e))
.cloned()
.collect())
}
pub async fn fetch_and_process_event(
remote_server: &ServerName,
room_id: &RoomId,
room_version_id: &RoomVersionId,
event_id: &EventId,
) -> AppResult<()> {
let res_body = fetch_event(remote_server, event_id).await?;
let Some(outlier_pdu) = process_to_outlier_pdu(
remote_server,
event_id,
room_id,
room_version_id,
serde_json::from_str(res_body.pdu.get())?,
)
.await?
else {
return Ok(());
};
outlier_pdu.save_to_database(true)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/event/outlier.rs | crates/server/src/event/outlier.rs | use std::ops::{Deref, DerefMut};
use diesel::prelude::*;
use crate::core::events::TimelineEventType;
use crate::core::identifiers::*;
use crate::core::serde::{CanonicalJsonObject, RawJsonValue};
use crate::core::state::{Event, StateError};
use crate::core::{self, Seqnum, UnixMillis};
use crate::data::room::{DbEventData, NewDbEvent};
use crate::data::{connect, diesel_exists, schema::*};
use crate::event::fetching::{
fetch_and_process_auth_chain, fetch_and_process_missing_events,
fetch_and_process_missing_state, fetch_and_process_missing_state_by_ids,
};
use crate::event::handler::auth_check;
use crate::event::resolver::resolve_state_at_incoming;
use crate::event::{PduEvent, SnPduEvent, ensure_event_sn};
use crate::room::state::update_backward_extremities;
use crate::room::timeline;
use crate::utils::SeqnumQueueGuard;
use crate::{AppError, AppResult, MatrixError, RoomMutexGuard};
#[derive(Clone, Debug)]
pub struct OutlierPdu {
pub pdu: PduEvent,
pub json_data: CanonicalJsonObject,
pub soft_failed: bool,
pub remote_server: OwnedServerName,
pub room_id: OwnedRoomId,
pub room_version: RoomVersionId,
pub event_sn: Option<Seqnum>,
pub rejected_auth_events: Vec<OwnedEventId>,
pub rejected_prev_events: Vec<OwnedEventId>,
}
impl AsRef<PduEvent> for OutlierPdu {
fn as_ref(&self) -> &PduEvent {
&self.pdu
}
}
impl AsMut<PduEvent> for OutlierPdu {
fn as_mut(&mut self) -> &mut PduEvent {
&mut self.pdu
}
}
impl DerefMut for OutlierPdu {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.pdu
}
}
impl Deref for OutlierPdu {
type Target = PduEvent;
fn deref(&self) -> &Self::Target {
&self.pdu
}
}
impl crate::core::state::Event for OutlierPdu {
type Id = OwnedEventId;
fn event_id(&self) -> &Self::Id {
&self.event_id
}
fn room_id(&self) -> &RoomId {
&self.room_id
}
fn sender(&self) -> &UserId {
&self.sender
}
fn event_type(&self) -> &TimelineEventType {
&self.event_ty
}
fn content(&self) -> &RawJsonValue {
&self.content
}
fn origin_server_ts(&self) -> UnixMillis {
self.origin_server_ts
}
fn state_key(&self) -> Option<&str> {
self.state_key.as_deref()
}
fn prev_events(&self) -> &[Self::Id] {
self.prev_events.deref()
}
fn auth_events(&self) -> &[Self::Id] {
self.auth_events.deref()
}
fn redacts(&self) -> Option<&Self::Id> {
self.redacts.as_ref()
}
fn rejected(&self) -> bool {
self.pdu.rejected()
}
}
impl OutlierPdu {
pub fn save_to_database(
self,
is_backfill: bool,
) -> AppResult<(SnPduEvent, CanonicalJsonObject, Option<SeqnumQueueGuard>)> {
let Self {
pdu,
json_data,
soft_failed,
room_id,
event_sn,
..
} = self;
if let Some(event_sn) = event_sn {
return Ok((
SnPduEvent {
pdu,
event_sn,
is_outlier: true,
soft_failed,
is_backfill,
},
json_data,
None,
));
}
let (event_sn, event_guard) = ensure_event_sn(&room_id, &pdu.event_id)?;
let mut db_event =
NewDbEvent::from_canonical_json(&pdu.event_id, event_sn, &json_data, is_backfill)?;
db_event.is_outlier = true;
db_event.soft_failed = soft_failed;
db_event.is_rejected = pdu.rejection_reason.is_some();
db_event.rejection_reason = pdu.rejection_reason.clone();
db_event.save()?;
DbEventData {
event_id: pdu.event_id.clone(),
event_sn,
room_id: pdu.room_id.clone(),
internal_metadata: None,
json_data: serde_json::to_value(&json_data)?,
format_version: None,
}
.save()?;
let pdu = SnPduEvent {
pdu,
event_sn,
is_outlier: true,
soft_failed,
is_backfill,
};
update_backward_extremities(&pdu)?;
Ok((pdu, json_data, event_guard))
}
pub async fn process_incoming(
mut self,
remote_server: &ServerName,
is_backfill: bool,
) -> AppResult<(SnPduEvent, CanonicalJsonObject, Option<SeqnumQueueGuard>)> {
if (!self.soft_failed && !self.rejected())
|| (self.rejected()
&& self.rejected_prev_events.is_empty()
&& self.rejected_auth_events.is_empty())
{
return self.save_to_database(is_backfill);
}
// Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
if let Err(e) = fetch_and_process_missing_events(
&self.remote_server,
&self.room_id,
&self.room_version,
&self,
is_backfill,
)
.await
{
if let AppError::Matrix(MatrixError { ref kind, .. }) = e {
if *kind == core::error::ErrorKind::BadJson {
self.rejection_reason = Some(format!("bad prev events: {}", e));
let state_lock = crate::room::lock_state(&self.room_id).await;
return self.save_to_database(is_backfill);
} else {
self.soft_failed = true;
}
} else {
self.soft_failed = true;
}
}
self.process_pulled(remote_server, is_backfill).await
}
fn any_auth_event_rejected(&self) -> AppResult<bool> {
let query = events::table
.filter(events::id.eq_any(&self.pdu.auth_events))
.filter(events::is_rejected.eq(true));
Ok(diesel_exists!(query, &mut connect()?)?)
}
fn any_prev_event_rejected(&self) -> AppResult<bool> {
let query = events::table
.filter(events::id.eq_any(&self.pdu.prev_events))
.filter(events::is_rejected.eq(true));
Ok(diesel_exists!(query, &mut connect()?)?)
}
pub async fn process_pulled(
mut self,
remote_server: &ServerName,
is_backfill: bool,
) -> AppResult<(SnPduEvent, CanonicalJsonObject, Option<SeqnumQueueGuard>)> {
let version_rules = crate::room::get_version_rules(&self.room_version)?;
if !self.soft_failed || self.rejected() {
return self.save_to_database(is_backfill);
}
if self.any_prev_event_rejected()? {
self.rejection_reason = Some("one or more prev events are rejected".to_string());
return self.save_to_database(is_backfill);
}
if self.any_auth_event_rejected()?
&& let Err(e) = fetch_and_process_auth_chain(
&self.remote_server,
&self.room_id,
&self.room_version,
&self.pdu.event_id,
)
.await
{
if let AppError::HttpStatus(_) = e {
self.soft_failed = true;
} else {
self.rejection_reason = Some("one or more auth events are rejected".to_string());
}
return self.save_to_database(is_backfill);
}
let (_prev_events, missing_prev_event_ids) =
timeline::get_may_missing_pdus(&self.room_id, &self.pdu.prev_events)?;
if !missing_prev_event_ids.is_empty() {
for event_id in &missing_prev_event_ids {
let missing_events = match fetch_and_process_missing_state_by_ids(
&self.remote_server,
&self.room_id,
&self.room_version,
event_id,
)
.await
{
Ok(missing_events) => {
self.soft_failed = !missing_events.is_empty();
missing_events
}
Err(e) => {
if let AppError::Matrix(MatrixError { ref kind, .. }) = e {
if *kind == core::error::ErrorKind::BadJson {
self.rejection_reason =
Some(format!("failed to bad prev events: {}", e));
} else {
self.soft_failed = true;
}
} else {
self.soft_failed = true;
}
vec![]
}
};
if !missing_events.is_empty() {
for event_id in &missing_events {
if let Err(e) = fetch_and_process_auth_chain(
&self.remote_server,
&self.room_id,
&self.room_version,
event_id,
)
.await
{
warn!("error fetching auth chain for {}: {}", event_id, e);
}
}
}
}
}
if self.pdu.rejection_reason.is_none() {
let state_at_incoming_event = if let Some(state_at_incoming_event) =
resolve_state_at_incoming(&self.pdu, &version_rules).await?
{
Some(state_at_incoming_event)
} else {
if missing_prev_event_ids.is_empty() {
fetch_and_process_missing_state(
&self.remote_server,
&self.room_id,
&self.room_version,
&self.pdu.event_id,
)
.await
.ok()
.map(|r| r.state_events)
} else {
None
}
};
if let Err(e) =
auth_check(&self.pdu, &version_rules, state_at_incoming_event.as_ref()).await
{
match e {
AppError::State(StateError::Forbidden(brief)) => {
self.pdu.rejection_reason = Some(brief);
}
_ => {
self.soft_failed = true;
}
}
} else {
self.soft_failed = false;
}
}
self.save_to_database(is_backfill)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/media/remote.rs | crates/server/src/media/remote.rs | use std::time::{Duration, SystemTime, UNIX_EPOCH};
use diesel::prelude::*;
use salvo::Response;
use crate::core::federation::media::ContentReqArgs;
use crate::core::identifiers::*;
use crate::core::{Mxc, ServerName, UserId};
use crate::data::connect;
use crate::data::schema::*;
use crate::{AppError, AppResult, config, exts::*};
use super::{Dimension, FileMeta};
pub async fn fetch_remote_content(
_mxc: &str,
server_name: &ServerName,
media_id: &str,
res: &mut Response,
) -> AppResult<()> {
let content_req = crate::core::media::content_request(
&server_name.origin().await,
crate::core::media::ContentReqArgs {
server_name: server_name.to_owned(),
media_id: media_id.to_owned(),
timeout_ms: Duration::from_secs(20),
allow_remote: true,
allow_redirect: true,
},
)?
.into_inner();
let content_response = if let Ok(content_response) =
crate::sending::send_federation_request(server_name, content_req, None).await
{
content_response
} else {
let content_req = crate::core::federation::media::content_request(
&server_name.origin().await,
ContentReqArgs {
media_id: media_id.to_owned(),
timeout_ms: Duration::from_secs(20),
},
)?
.into_inner();
crate::sending::send_federation_request(server_name, content_req, None).await?
};
*res.headers_mut() = content_response.headers().to_owned();
res.status_code(content_response.status());
res.stream(content_response.bytes_stream());
Ok(())
}
pub async fn fetch_remote_thumbnail(
mxc: &Mxc<'_>,
user: Option<&UserId>,
server: Option<&ServerName>,
timeout_ms: Duration,
dim: &Dimension,
) -> AppResult<FileMeta> {
check_fetch_authorized(mxc)?;
let result = fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim).await;
if result.is_err() {
return fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim).await;
}
result
}
// pub async fn fetch_remote_content(
// mxc: &Mxc<'_>,
// user: Option<&UserId>,
// server: Option<&ServerName>,
// timeout_ms: Duration,
// ) -> AppResult<FileMeta> {
// check_fetch_authorized(mxc)?;
// let result = fetch_content_authenticated(mxc, user, server, timeout_ms).await;
// if result.is_err() {
// return fetch_content_unauthenticated(mxc, user, server, timeout_ms).await;
// }
// result
// }
async fn fetch_thumbnail_authenticated(
mxc: &Mxc<'_>,
user: Option<&UserId>,
server: Option<&ServerName>,
timeout_ms: Duration,
dim: &Dimension,
) -> AppResult<FileMeta> {
unimplemented!()
// use federation::authenticated_media::get_content_thumbnail::v1::{Request, Response};
// let request = Request {
// media_id: mxc.media_id.into(),
// method: dim.method.clone().into(),
// width: dim.width.into(),
// height: dim.height.into(),
// animated: true.into(),
// timeout_ms,
// };
// let Response { content, .. } = self
// .federation_request(mxc, user, server, request)
// .await?;
// match content {
// | FileOrLocation::File(content) =>
// self.handle_thumbnail_file(mxc, user, dim, content)
// .await,
// | FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await,
// }
}
// async fn fetch_content_authenticated(
// mxc: &Mxc<'_>,
// user: Option<&UserId>,
// server: Option<&ServerName>,
// timeout_ms: Duration,
// ) -> AppResult<FileMeta> {
// use federation::authenticated_media::get_content::v1::{Request, Response};
// let request = Request {
// media_id: mxc.media_id.into(),
// timeout_ms,
// };
// let Response { content, .. } = self
// .federation_request(mxc, user, server, request)
// .await?;
// match content {
// | FileOrLocation::File(content) => self.handle_content_file(mxc, user, content).await,
// | FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await,
// }
// }
async fn fetch_thumbnail_unauthenticated(
mxc: &Mxc<'_>,
user: Option<&UserId>,
server: Option<&ServerName>,
timeout_ms: Duration,
dim: &Dimension,
) -> AppResult<FileMeta> {
unimplemented!()
// use media::get_content_thumbnail::v3::{Request, Response};
// let request = Request {
// allow_remote: true,
// allow_redirect: true,
// animated: true.into(),
// method: dim.method.clone().into(),
// width: dim.width.into(),
// height: dim.height.into(),
// server_name: mxc.server_name.into(),
// media_id: mxc.media_id.into(),
// timeout_ms,
// };
// let Response {
// file, content_type, content_disposition, ..
// } = self
// .federation_request(mxc, user, server, request)
// .await?;
// let content = Content { file, content_type, content_disposition };
// handle_thumbnail_file(mxc, user, dim, content)
// .await
}
// async fn fetch_content_unauthenticated(
// mxc: &Mxc<'_>,
// user: Option<&UserId>,
// server: Option<&ServerName>,
// timeout_ms: Duration,
// ) -> AppResult<FileMeta> {
// use media::get_content::v3::{Request, Response};
// let request = Request {
// allow_remote: true,
// allow_redirect: true,
// server_name: mxc.server_name.into(),
// media_id: mxc.media_id.into(),
// timeout_ms,
// };
// let Response {
// file, content_type, content_disposition, ..
// } = self
// .federation_request(mxc, user, server, request)
// .await?;
// let content = Content { file, content_type, content_disposition };
// handle_content_file(mxc, user, content).await
// }
// async fn handle_thumbnail_file(
// mxc: &Mxc<'_>,
// user: Option<&UserId>,
// dim: &Dimension,
// content: Content,
// ) -> AppResult<FileMeta> {
// let content_disposition = make_content_disposition(
// content.content_disposition,
// content.content_type.as_deref(),
// None,
// );
// crate::media::save_thumbnail(
// mxc,
// user,
// content.content_type.as_deref(),
// Some(&content_disposition),
// dim,
// &content.file,
// )
// .await
// .map(|()| FileMeta {
// content: Some(content.file),
// content_type: content.content_type.map(Into::into),
// content_disposition: Some(content_disposition),
// })
// }
// async fn handle_content_file(
// mxc: &Mxc<'_>,
// user: Option<&UserId>,
// content: Content,
// ) -> AppResult<FileMeta> {
// let content_disposition = make_content_disposition(
// content.content_disposition.as_ref(),
// content.content_type.as_deref(),
// None,
// );
// create(
// mxc,
// user,
// Some(&content_disposition),
// content.content_type.as_deref(),
// &content.file,
// )
// .await
// .map(|()| FileMeta {
// content: Some(content.file),
// content_type: content.content_type.map(Into::into),
// content_disposition: Some(content_disposition),
// })
// }
// async fn handle_location(
// mxc: &Mxc<'_>,
// user: Option<&UserId>,
// location: &str,
// ) -> AppResult<FileMeta> {
// location_request(location)
// .await
// .map_err(|error| AppError::public("fetching media from location failed"))
// }
// async fn location_request(location: &str) -> AppResult<FileMeta> {
// let response = self
// .services
// .client
// .extern_media
// .get(location)
// .send()
// .await?;
// let content_type = response
// .headers()
// .get(CONTENT_TYPE)
// .map(HeaderValue::to_str)
// .and_then(Result::ok)
// .map(str::to_owned);
// let content_disposition = response
// .headers()
// .get(CONTENT_DISPOSITION)
// .map(HeaderValue::as_bytes)
// .map(TryFrom::try_from)
// .and_then(Result::ok);
// response
// .bytes()
// .await
// .map(Vec::from)
// .map_err(Into::into)
// .map(|content| FileMeta {
// content: Some(content),
// content_type: content_type.clone(),
// content_disposition: Some(make_content_disposition(
// content_disposition.as_ref(),
// content_type.as_deref(),
// None,
// )),
// })
// }
// async fn federation_request<Request>(
// mxc: &Mxc<'_>,
// user: Option<&UserId>,
// server: Option<&ServerName>,
// request: Request,
// ) -> Result<Request::IncomingResponse>
// where
// Request: OutgoingRequest + Send + Debug,
// {
// unimplemented!()
// self.services
// .sending
// .send_federation_request(server.unwrap_or(mxc.server_name), request)
// .await
// }
// pub async fn fetch_remote_thumbnail_legacy(
// body: &media::get_content_thumbnail::v3::Request,
// ) -> AppResult<media::get_content_thumbnail::v3::Response> {
// unimplemented!()
// let mxc = Mxc {
// server_name: &body.server_name,
// media_id: &body.media_id,
// };
// self.check_legacy_freeze()?;
// self.check_fetch_authorized(&mxc)?;
// let response = self
// .services
// .sending
// .send_federation_request(mxc.server_name, media::get_content_thumbnail::v3::Request {
// allow_remote: body.allow_remote,
// height: body.height,
// width: body.width,
// method: body.method.clone(),
// server_name: body.server_name.clone(),
// media_id: body.media_id.clone(),
// timeout_ms: body.timeout_ms,
// allow_redirect: body.allow_redirect,
// animated: body.animated,
// })
// .await?;
// let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?;
// self.upload_thumbnail(
// &mxc,
// None,
// None,
// response.content_type.as_deref(),
// &dim,
// &response.file,
// )
// .await?;
// Ok(response)
// }
// pub async fn fetch_remote_content_legacy(
// mxc: &Mxc<'_>,
// allow_redirect: bool,
// timeout_ms: Duration,
// ) -> AppResult<media::get_content::v3::Response> {
// unimplemented!()
// self.check_legacy_freeze()?;
// self.check_fetch_authorized(mxc)?;
// let response = self
// .services
// .sending
// .send_federation_request(mxc.server_name, media::get_content::v3::Request {
// allow_remote: true,
// server_name: mxc.server_name.into(),
// media_id: mxc.media_id.into(),
// timeout_ms,
// allow_redirect,
// })
// .await?;
// let content_disposition = make_content_disposition(
// response.content_disposition.as_ref(),
// response.content_type.as_deref(),
// None,
// );
// create(
// mxc,
// None,
// Some(&content_disposition),
// response.content_type.as_deref(),
// &response.file,
// )
// .await?;
// Ok(response)
// }
fn check_fetch_authorized(mxc: &Mxc<'_>) -> AppResult<()> {
let conf = config::get();
if conf
.media
.prevent_downloads_from
.is_match(mxc.server_name.host())
|| conf
.forbidden_remote_server_names
.is_match(mxc.server_name.host())
{
// we'll lie to the client and say the blocked server's media was not found and
// log. the client has no way of telling anyways so this is a security bonus.
warn!(%mxc, "Received request for media on blocklisted server");
return Err(AppError::public("Media not found."));
}
Ok(())
}
// fn check_legacy_freeze() -> AppResult<()> {
// unimplemented!()
// self.services
// .server
// .config
// .freeze_legacy_media
// .then_some(())
// .ok_or(err!(Request(NotFound("Remote media is frozen."))))
// }
pub async fn delete_past_remote_media(
time: SystemTime,
before: bool,
after: bool,
yes_i_want_to_delete_local_media: bool,
) -> AppResult<u64> {
if before && after {
return Err(AppError::public(
"Please only pick one argument, --before or --after.",
));
}
if !(before || after) {
return Err(AppError::public(
"Please pick one argument, --before or --after.",
));
}
let time = time.duration_since(UNIX_EPOCH)?.as_millis();
let mxcs = if after {
media_metadatas::table
.filter(media_metadatas::origin_server.ne(config::server_name()))
.filter(media_metadatas::created_at.lt(time as i64))
.select((media_metadatas::origin_server, media_metadatas::media_id))
.load::<(OwnedServerName, String)>(&mut connect()?)?
} else {
media_metadatas::table
.filter(media_metadatas::origin_server.eq(config::server_name()))
.filter(media_metadatas::created_at.gt(time as i64))
.select((media_metadatas::origin_server, media_metadatas::media_id))
.load::<(OwnedServerName, String)>(&mut connect()?)?
};
let mut count = 0;
for (origin_server, media_id) in &mxcs {
let mxc = OwnedMxcUri::from(format!("mxc://{origin_server}/{media_id}"));
if let Err(e) =
delete_remote_media(origin_server, media_id, yes_i_want_to_delete_local_media).await
{
warn!("failed to delete remote media {mxc}: {e}");
} else {
count += 1;
}
}
Ok(count)
}
pub async fn delete_remote_media(
server_name: &ServerName,
media_id: &str,
yes_i_want_to_delete_local_media: bool,
) -> AppResult<()> {
crate::data::media::delete_media(server_name, media_id)?;
if !yes_i_want_to_delete_local_media {
return Ok(());
}
let path = crate::media::get_media_path(server_name, media_id);
if let Err(e) = std::fs::remove_file(&path) {
warn!("failed to delete local media file {path:?}: {e}");
}
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/media/preview.rs | crates/server/src/media/preview.rs | use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, LazyLock, OnceLock};
use std::time::Duration;
use ipaddress::IPAddress;
use serde::Serialize;
use tokio::io::AsyncWriteExt;
use tokio::sync::Mutex;
use url::Url;
use crate::core::identifiers::*;
use crate::core::{MatrixError, UnixMillis};
use crate::data::media::{DbUrlPreview, NewDbMetadata, NewDbUrlPreview};
use crate::{AppResult, config, data, utils};
static URL_PREVIEW_MUTEX: LazyLock<Mutex<HashMap<String, Arc<Mutex<()>>>>> =
LazyLock::new(Default::default);
async fn get_url_preview_mutex(url: &str) -> Arc<Mutex<()>> {
let mut locks = URL_PREVIEW_MUTEX.lock().await;
locks
.entry(url.to_string())
.or_insert_with(|| Arc::new(Mutex::new(())))
.clone()
}
fn client() -> &'static reqwest::Client {
static CLIENT: OnceLock<reqwest::Client> = OnceLock::new();
CLIENT.get_or_init(|| {
reqwest::ClientBuilder::new()
.timeout(Duration::from_secs(20))
.user_agent("Palpo")
.build()
.expect("Failed to create reqwest client")
})
}
#[derive(Serialize, Default, Clone, Debug)]
pub struct UrlPreviewData {
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:url"))]
pub og_url: Option<String>,
#[serde(
skip_serializing_if = "Option::is_none",
rename(serialize = "og:title")
)]
pub og_title: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:type"))]
pub og_type: Option<String>,
#[serde(
skip_serializing_if = "Option::is_none",
rename(serialize = "og:description")
)]
pub og_description: Option<String>,
#[serde(
skip_serializing_if = "Option::is_none",
rename(serialize = "og:image")
)]
pub og_image: Option<String>,
#[serde(
skip_serializing_if = "Option::is_none",
rename(serialize = "matrix:image:size")
)]
pub image_size: Option<u64>,
#[serde(
skip_serializing_if = "Option::is_none",
rename(serialize = "og:image:width")
)]
pub og_image_width: Option<u32>,
#[serde(
skip_serializing_if = "Option::is_none",
rename(serialize = "og:image:height")
)]
pub og_image_height: Option<u32>,
}
impl UrlPreviewData {
pub fn into_new_db_url_preview(self, url: impl Into<String>) -> NewDbUrlPreview {
let Self {
og_title,
og_type,
og_url,
og_description,
og_image,
image_size,
og_image_width,
og_image_height,
..
} = self;
NewDbUrlPreview {
url: url.into(),
og_title,
og_type,
og_url,
og_description,
og_image,
image_size: image_size.map(|s| s as i64),
og_image_width: og_image_width.map(|w| w as i32),
og_image_height: og_image_height.map(|h| h as i32),
created_at: UnixMillis::now(),
}
}
}
impl From<DbUrlPreview> for UrlPreviewData {
fn from(preview: DbUrlPreview) -> Self {
let DbUrlPreview {
og_title,
og_type,
og_url,
og_description,
og_image,
image_size,
og_image_width,
og_image_height,
..
} = preview;
Self {
og_title,
og_type,
og_url,
og_description,
og_image,
image_size: image_size.map(|s| s as u64),
og_image_width: og_image_width.map(|w| w as u32),
og_image_height: og_image_height.map(|h| h as u32),
}
}
}
pub fn url_preview_allowed(url: &Url) -> bool {
if ["http", "https"]
.iter()
.all(|&scheme| scheme != url.scheme().to_lowercase())
{
debug!("Ignoring non-HTTP/HTTPS URL to preview: {}", url);
return false;
}
let host = match url.host_str() {
None => {
debug!(
"Ignoring URL preview for a URL that does not have a host (?): {}",
url
);
return false;
}
Some(h) => h.to_owned(),
};
let conf = crate::config::get();
let allowlist_domain_contains = &conf.url_preview.domain_contains_allowlist;
let allowlist_domain_explicit = &conf.url_preview.domain_explicit_allowlist;
let denylist_domain_explicit = &conf.url_preview.domain_explicit_denylist;
let allowlist_url_contains = &conf.url_preview.url_contains_allowlist;
if allowlist_domain_contains.contains(&"*".to_owned())
|| allowlist_domain_explicit.contains(&"*".to_owned())
|| allowlist_url_contains.contains(&"*".to_owned())
{
debug!(
"Config key contains * which is allowing all URL previews. Allowing URL {}",
url
);
return true;
}
if !host.is_empty() {
if denylist_domain_explicit.contains(&host) {
debug!(
"Host {} is not allowed by url_preview_domain_explicit_denylist (check 1/4)",
&host
);
return false;
}
if allowlist_domain_explicit.contains(&host) {
debug!(
"Host {} is allowed by url_preview_domain_explicit_allowlist (check 2/4)",
&host
);
return true;
}
if allowlist_domain_contains
.iter()
.any(|domain_s| domain_s.contains(&host.clone()))
{
debug!(
"Host {} is allowed by url_preview_domain_contains_allowlist (check 3/4)",
&host
);
return true;
}
if allowlist_url_contains
.iter()
.any(|url_s| url.to_string().contains(url_s))
{
debug!(
"URL {} is allowed by url_preview_url_contains_allowlist (check 4/4)",
&host
);
return true;
}
// check root domain if available and if user has root domain checks
if conf.url_preview.check_root_domain {
debug!("Checking root domain");
match host.split_once('.') {
None => return false,
Some((_, root_domain)) => {
if denylist_domain_explicit.contains(&root_domain.to_owned()) {
debug!(
"Root domain {} is not allowed by \
url_preview_domain_explicit_denylist (check 1/3)",
&root_domain
);
return true;
}
if allowlist_domain_explicit.contains(&root_domain.to_owned()) {
debug!(
"Root domain {} is allowed by url_preview_domain_explicit_allowlist \
(check 2/3)",
&root_domain
);
return true;
}
if allowlist_domain_contains
.iter()
.any(|domain_s| domain_s.contains(&root_domain.to_owned()))
{
debug!(
"Root domain {} is allowed by url_preview_domain_contains_allowlist \
(check 3/3)",
&root_domain
);
return true;
}
}
}
}
}
false
}
pub async fn get_url_preview(url: &Url) -> AppResult<UrlPreviewData> {
if let Ok(preview) = data::media::get_url_preview(url.as_str()) {
return Ok(preview.into());
}
// ensure that only one request is made per URL
let mutex = get_url_preview_mutex(url.as_str()).await;
let _request_lock = mutex.lock().await;
match data::media::get_url_preview(url.as_str()) {
Ok(preview) => Ok(preview.into()),
Err(_) => request_url_preview(url).await,
}
}
async fn request_url_preview(url: &Url) -> AppResult<UrlPreviewData> {
let client = client();
if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated"))
&& !config::valid_cidr_range(&ip)
{
return Err(
MatrixError::forbidden("Requesting from this address is forbidden.", None).into(),
);
}
let response = client.get(url.clone()).send().await?;
debug!(
?url,
"URL preview response headers: {:?}",
response.headers()
);
if let Some(remote_addr) = response.remote_addr() {
debug!(
?url,
"URL preview response remote address: {:?}", remote_addr
);
if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string())
&& !config::valid_cidr_range(&ip)
{
return Err(
MatrixError::forbidden("Requesting from this address is forbidden.", None).into(),
);
}
}
let Some(content_type) = response.headers().get(reqwest::header::CONTENT_TYPE) else {
return Err(MatrixError::unknown("Unknown or invalid Content-Type header").into());
};
let content_type = content_type.to_str().map_err(|e| {
MatrixError::unknown(format!("Unknown or invalid Content-Type header: {e}"))
})?;
let data = match content_type {
html if html.starts_with("text/html") => download_html(url).await?,
img if img.starts_with("image/") => download_image(url).await?,
_ => return Err(MatrixError::unknown("Unsupported Content-Type").into()),
};
crate::data::media::set_url_preview(&data.clone().into_new_db_url_preview(url.as_str()))?;
Ok(data)
}
async fn download_image(url: &Url) -> AppResult<UrlPreviewData> {
use image::ImageReader;
let conf = crate::config::get();
let image = client().get(url.to_owned()).send().await?;
let content_type = image.headers().get(reqwest::header::CONTENT_TYPE);
let content_type = content_type
.and_then(|ct| ct.to_str().ok())
.map(|c| c.to_owned());
let image = image.bytes().await?;
let mxc = Mxc {
server_name: &conf.server_name,
media_id: &utils::random_string(crate::MXC_LENGTH),
};
let dest_path = crate::media::get_media_path(&conf.server_name, mxc.media_id);
let dest_path = Path::new(&dest_path);
if !dest_path.exists() {
let parent_dir = utils::fs::get_parent_dir(dest_path);
std::fs::create_dir_all(&parent_dir)?;
let mut file = tokio::fs::File::create(dest_path).await?;
file.write_all(&image).await?;
let metadata = NewDbMetadata {
media_id: mxc.media_id.to_string(),
origin_server: conf.server_name.clone(),
disposition_type: Some("inline".into()),
content_type,
file_name: None,
file_extension: None,
file_size: image.len() as i64,
file_hash: None,
created_by: None,
created_at: UnixMillis::now(),
};
crate::data::media::insert_metadata(&metadata)?;
}
let cursor = std::io::Cursor::new(&image);
let (width, height) = match ImageReader::new(cursor).with_guessed_format() {
Err(_) => (None, None),
Ok(reader) => match reader.into_dimensions() {
Err(_) => (None, None),
Ok((width, height)) => (Some(width), Some(height)),
},
};
Ok(UrlPreviewData {
og_image: Some(mxc.to_string()),
image_size: Some(image.len() as u64),
og_image_width: width,
og_image_height: height,
..Default::default()
})
}
async fn download_html(url: &Url) -> AppResult<UrlPreviewData> {
use webpage::HTML;
let conf = crate::config::get();
let client = client();
let mut response = client.get(url.to_owned()).send().await?;
let mut bytes: Vec<u8> = Vec::new();
while let Some(chunk) = response.chunk().await? {
bytes.extend_from_slice(&chunk);
if bytes.len() > conf.url_preview.max_spider_size {
debug!(
"Response body from URL {} exceeds url_preview.max_spider_size ({}), not \
processing the rest of the response body and assuming our necessary data is in \
this range.",
url, conf.url_preview.max_spider_size
);
break;
}
}
let body = String::from_utf8_lossy(&bytes);
let Ok(html) = HTML::from_string(body.to_string(), Some(url.to_string())) else {
return Err(MatrixError::unknown("Failed to parse HTML").into());
};
let mut data = match html.opengraph.images.first() {
None => UrlPreviewData::default(),
Some(obj) => download_image(&url.join(&obj.url)?).await?,
};
data.og_type = Some(html.opengraph.og_type);
let props = html.opengraph.properties;
/* use OpenGraph title/description, but fall back to HTML if not available */
data.og_url = props.get("url").cloned().or(Some(url.to_string()));
data.og_title = props.get("title").cloned().or(html.title);
data.og_description = props.get("description").cloned().or(html.description);
Ok(data)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/server_key/verify.rs | crates/server/src/server_key/verify.rs | use serde_json::value::RawValue as RawJsonValue;
use super::get_event_keys;
use crate::core::identifiers::*;
use crate::core::serde::canonical_json::{CanonicalJsonObject, CanonicalJsonValue};
use crate::core::signatures::{self, Verified};
use crate::event::gen_event_id_canonical_json;
use crate::{AppError, AppResult};
pub async fn validate_and_add_event_id(
pdu: &RawJsonValue,
room_version: &RoomVersionId,
) -> AppResult<(OwnedEventId, CanonicalJsonObject)> {
let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?;
if let Err(e) = verify_event(&value, room_version).await {
return Err(AppError::public(format!(
"Event {event_id} failed verification: {e:?}"
)));
}
value.insert(
"event_id".into(),
CanonicalJsonValue::String(event_id.as_str().into()),
);
Ok((event_id, value))
}
pub async fn validate_and_add_event_id_no_fetch(
pdu: &RawJsonValue,
room_version: &RoomVersionId,
) -> AppResult<(OwnedEventId, CanonicalJsonObject)> {
let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?;
if let Err(e) = verify_event(&value, room_version).await {
return Err(AppError::public(format!(
"Event {event_id} failed verification: {e:?}"
)));
}
value.insert(
"event_id".into(),
CanonicalJsonValue::String(event_id.as_str().into()),
);
Ok((event_id, value))
}
pub async fn verify_event(
event: &CanonicalJsonObject,
room_version: &RoomVersionId,
) -> AppResult<Verified> {
let version_rules = crate::room::get_version_rules(room_version)?;
let keys = get_event_keys(event, &version_rules).await?;
signatures::verify_event(&keys, event, &version_rules).map_err(Into::into)
}
pub async fn verify_json(
event: &CanonicalJsonObject,
room_version: &RoomVersionId,
) -> AppResult<()> {
let version_rules = crate::room::get_version_rules(room_version)?;
let keys = get_event_keys(event, &version_rules).await?;
signatures::verify_json(&keys, event).map_err(Into::into)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/server_key/acquire.rs | crates/server/src/server_key/acquire.rs | use std::{
borrow::Borrow,
collections::{BTreeMap, BTreeSet},
time::Duration,
};
use futures_util::{StreamExt, stream::FuturesUnordered};
use tokio::time::{Instant, timeout_at};
use super::{
add_signing_keys, batch_notary_request, key_exists, server_request, verify_key_exists,
};
use crate::config;
use crate::core::federation::discovery::ServerSigningKeys;
use crate::core::serde::{CanonicalJsonObject, RawJson, RawJsonValue};
use crate::core::{OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId};
type Batch = BTreeMap<OwnedServerName, Vec<OwnedServerSigningKeyId>>;
pub async fn acquire_events_pubkeys<'a, I>(events: I)
where
I: Iterator<Item = &'a Box<RawJsonValue>> + Send,
{
type Batch = BTreeMap<OwnedServerName, BTreeSet<OwnedServerSigningKeyId>>;
type Signatures = BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, String>>;
let mut batch = Batch::new();
events
.cloned()
.map(RawJson::<CanonicalJsonObject>::from_raw_value)
.map(|event| event.get_field::<Signatures>("signatures"))
.filter_map(|v| v.ok().flatten())
.flat_map(IntoIterator::into_iter)
.for_each(|(server, sigs)| {
batch.entry(server).or_default().extend(sigs.into_keys());
});
let batch = batch
.iter()
.map(|(server, keys)| (server.borrow(), keys.iter().map(Borrow::borrow)));
acquire_pubkeys(batch).await;
}
pub async fn acquire_pubkeys<'a, S, K>(batch: S)
where
S: Iterator<Item = (&'a ServerName, K)> + Send + Clone,
K: Iterator<Item = &'a ServerSigningKeyId> + Send + Clone,
{
let notary_only = crate::config::get().only_query_trusted_key_servers;
let notary_first_always = crate::config::get().query_trusted_key_servers_first;
let notary_first_on_join = crate::config::get().query_trusted_key_servers_first_on_join;
let requested_servers = batch.clone().count();
let requested_keys = batch.clone().flat_map(|(_, key_ids)| key_ids).count();
debug!("acquire {requested_keys} keys from {requested_servers}");
let mut missing = acquire_locals(batch).await;
let mut missing_keys = keys_count(&missing);
let mut missing_servers = missing.len();
if missing_servers == 0 {
return;
}
info!("{missing_keys} keys for {missing_servers} servers will be acquired");
if notary_first_always || notary_first_on_join {
missing = acquire_notary(missing.into_iter()).await;
missing_keys = keys_count(&missing);
missing_servers = missing.len();
if missing_keys == 0 {
return;
}
warn!("missing {missing_keys} keys for {missing_servers} servers from all notaries first");
}
if !notary_only {
missing = acquire_origins(missing.into_iter()).await;
missing_keys = keys_count(&missing);
missing_servers = missing.len();
if missing_keys == 0 {
return;
}
tracing::warn!("missing {missing_keys} keys for {missing_servers} servers unreachable");
}
if !notary_first_always && !notary_first_on_join {
missing = acquire_notary(missing.into_iter()).await;
missing_keys = keys_count(&missing);
missing_servers = missing.len();
if missing_keys == 0 {
return;
}
tracing::warn!(
"still missing {missing_keys} keys for {missing_servers} servers from all notaries."
);
}
if missing_keys > 0 {
warn!(
"did not obtain {missing_keys} keys for {missing_servers} servers out of {requested_keys} total keys for \
{requested_servers} total servers."
);
}
for (server, key_ids) in missing {
tracing::warn!(?server, ?key_ids, "missing");
}
}
async fn acquire_locals<'a, S, K>(batch: S) -> Batch
where
S: Iterator<Item = (&'a ServerName, K)> + Send,
K: Iterator<Item = &'a ServerSigningKeyId> + Send,
{
let mut missing = Batch::new();
for (server, key_ids) in batch {
for key_id in key_ids {
if !verify_key_exists(server, key_id).unwrap_or(false) {
missing
.entry(server.into())
.or_default()
.push(key_id.into());
}
}
}
missing
}
async fn acquire_origins<I>(batch: I) -> Batch
where
I: Iterator<Item = (OwnedServerName, Vec<OwnedServerSigningKeyId>)> + Send,
{
let timeout = Instant::now()
.checked_add(Duration::from_secs(45))
.expect("timeout overflows");
let mut requests: FuturesUnordered<_> = batch
.map(|(origin, key_ids)| acquire_origin(origin, key_ids, timeout))
.collect();
let mut missing = Batch::new();
while let Some((origin, key_ids)) = requests.next().await {
if !key_ids.is_empty() {
missing.insert(origin, key_ids);
}
}
missing
}
async fn acquire_origin(
origin: OwnedServerName,
mut key_ids: Vec<OwnedServerSigningKeyId>,
timeout: Instant,
) -> (OwnedServerName, Vec<OwnedServerSigningKeyId>) {
match timeout_at(timeout, server_request(&origin)).await {
Err(e) => tracing::warn!(?origin, "timed out: {e}"),
Ok(Err(e)) => tracing::error!(?origin, "{e}"),
Ok(Ok(server_keys)) => {
trace!(
%origin,
?key_ids,
?server_keys,
"received server_keys"
);
let _ = add_signing_keys(server_keys.clone());
key_ids.retain(|key_id| !key_exists(&server_keys, key_id));
}
}
(origin, key_ids)
}
async fn acquire_notary<I>(batch: I) -> Batch
where
I: Iterator<Item = (OwnedServerName, Vec<OwnedServerSigningKeyId>)> + Send,
{
let mut missing: Batch = batch.collect();
let conf = config::get();
for notary in &conf.trusted_servers {
let missing_keys = keys_count(&missing);
let missing_servers = missing.len();
debug!(
"Asking notary {notary} for {missing_keys} missing keys from {missing_servers} servers"
);
let batch = missing
.iter()
.map(|(server, keys)| (server.borrow(), keys.iter().map(Borrow::borrow)));
match batch_notary_request(notary, batch).await {
Err(e) => error!("failed to contact notary {notary:?}: {e}"),
Ok(results) => {
for server_keys in results {
acquire_notary_result(&mut missing, server_keys).await;
}
}
}
}
missing
}
async fn acquire_notary_result(missing: &mut Batch, server_keys: ServerSigningKeys) {
let server = &server_keys.server_name;
let _ = add_signing_keys(server_keys.clone());
if let Some(key_ids) = missing.get_mut(server) {
key_ids.retain(|key_id| key_exists(&server_keys, key_id));
if key_ids.is_empty() {
missing.remove(server);
}
}
}
fn keys_count(batch: &Batch) -> usize {
batch.iter().flat_map(|(_, key_ids)| key_ids.iter()).count()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/server_key/request.rs | crates/server/src/server_key/request.rs | use std::{collections::BTreeMap, fmt::Debug};
use super::{GetUrlOrigin, minimum_valid_ts};
use crate::AppResult;
use crate::core::directory::QueryCriteria;
use crate::core::federation::directory::{
RemoteServerKeysBatchReqBody, RemoteServerKeysBatchResBody, RemoteServerKeysReqArgs,
RemoteServerKeysResBody, ServerKeysResBody, remote_server_keys_batch_request,
remote_server_keys_request, server_keys_request,
};
use crate::core::federation::discovery::ServerSigningKeys;
use crate::core::{
MatrixError, OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId,
};
type Batch = BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>;
pub(super) async fn batch_notary_request<'a, S, K>(
notary: &ServerName,
batch: S,
) -> AppResult<Vec<ServerSigningKeys>>
where
S: Iterator<Item = (&'a ServerName, K)> + Send,
K: Iterator<Item = &'a ServerSigningKeyId> + Send,
{
let criteria = QueryCriteria {
minimum_valid_until_ts: Some(minimum_valid_ts()),
};
let mut server_keys = batch.fold(Batch::new(), |mut batch, (server, key_ids)| {
batch
.entry(server.into())
.or_default()
.extend(key_ids.map(|key_id| (key_id.into(), criteria.clone())));
batch
});
debug_assert!(!server_keys.is_empty(), "empty batch request to notary");
let mut results = Vec::new();
while let Some(batch) = server_keys
.keys()
.rev()
.take(crate::config::get().trusted_server_batch_size)
.next_back()
.cloned()
{
let origin = batch.origin().await;
let request = remote_server_keys_batch_request(
&origin,
RemoteServerKeysBatchReqBody {
server_keys: server_keys.split_off(&batch),
},
)?
.into_inner();
debug!(
?notary,
?batch,
remaining = %server_keys.len(),
"notary request"
);
let response = crate::sending::send_federation_request(notary, request, None)
.await?
.json::<RemoteServerKeysBatchResBody>()
.await?
.server_keys;
results.extend(response);
}
Ok(results)
}
pub async fn notary_request(
notary: &ServerName,
target: &ServerName,
) -> AppResult<impl Iterator<Item = ServerSigningKeys> + Clone + Debug + Send> {
let request = remote_server_keys_request(
¬ary.origin().await,
RemoteServerKeysReqArgs {
server_name: target.into(),
minimum_valid_until_ts: minimum_valid_ts(),
},
)?
.into_inner();
let response = crate::sending::send_federation_request(notary, request, None)
.await?
.json::<RemoteServerKeysResBody>()
.await?
.server_keys;
Ok(response.into_iter())
}
pub async fn server_request(target: &ServerName) -> AppResult<ServerSigningKeys> {
let request = server_keys_request(&target.origin().await)?.into_inner();
let server_signing_key = crate::sending::send_federation_request(target, request, None)
.await?
.json::<ServerKeysResBody>()
.await?
.0;
if server_signing_key.server_name != target {
tracing::warn!( requested = ?target,
response = ?server_signing_key.server_name,
"Server responded with bogus server_name");
return Err(MatrixError::unknown("Server responded with bogus server_name").into());
}
Ok(server_signing_key)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/db.rs | crates/server/src/config/db.rs | use serde::Deserialize;
use crate::core::serde::default_false;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "db")]
#[derive(Clone, Debug, Deserialize)]
pub struct DbConfig {
/// Settings for the primary database. default reade env var PALPO_DB_URL.
#[serde(default = "default_db_url")]
pub url: String,
#[serde(default = "default_db_pool_size")]
pub pool_size: u32,
pub min_idle: Option<u32>,
/// Number of seconds to wait for unacknowledged TCP packets before treating the connection as
/// broken. This value will determine how long crates.io stays unavailable in case of full
/// packet loss between the application and the database: setting it too high will result in an
/// unnecessarily long outage (before the unhealthy database logic kicks in), while setting it
/// too low might result in healthy connections being dropped.
#[serde(default = "default_tcp_timeout")]
pub tcp_timeout: u64,
/// Time to wait for a connection to become available from the connection
/// pool before returning an error.
/// Time to wait for a connection to become available from the connection
/// pool before returning an error.
#[serde(default = "default_connection_timeout")]
pub connection_timeout: u64,
/// Time to wait for a query response before canceling the query and
/// returning an error.
#[serde(default = "default_statement_timeout")]
pub statement_timeout: u64,
/// Number of threads to use for asynchronous operations such as connection
/// creation.
#[serde(default = "default_helper_threads")]
pub helper_threads: usize,
/// Whether to enforce that all the database connections are encrypted with TLS.
#[serde(default = "default_false")]
pub enforce_tls: bool,
}
impl DbConfig {
pub fn into_data_db_config(self) -> crate::data::DbConfig {
let Self {
url,
pool_size,
min_idle,
tcp_timeout,
connection_timeout,
statement_timeout,
helper_threads,
enforce_tls,
} = self;
crate::data::DbConfig {
url: url.clone(),
pool_size,
min_idle,
tcp_timeout,
connection_timeout,
statement_timeout,
helper_threads,
enforce_tls,
}
}
}
impl Default for DbConfig {
fn default() -> Self {
Self {
url: default_db_url(),
pool_size: default_db_pool_size(),
min_idle: None,
tcp_timeout: default_tcp_timeout(),
connection_timeout: default_connection_timeout(),
statement_timeout: default_statement_timeout(),
helper_threads: default_helper_threads(),
enforce_tls: default_false(),
}
}
}
fn default_db_url() -> String {
std::env::var("PALPO_DB_URL").unwrap_or_default()
}
fn default_db_pool_size() -> u32 {
10
}
fn default_tcp_timeout() -> u64 {
10_000
}
fn default_connection_timeout() -> u64 {
30_000
}
fn default_statement_timeout() -> u64 {
30_000
}
fn default_helper_threads() -> usize {
10
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/http_client.rs | crates/server/src/config/http_client.rs | use serde::Deserialize;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "client")]
#[derive(Clone, Debug, Deserialize)]
pub struct HttpClientConfig {
/// Well-known resolution connection timeout.
///
/// default: 6_000
#[serde(default = "default_well_known_conn_timeout")]
pub well_known_conn_timeout: u64,
/// HTTP well-known resolution request timeout.
///
/// default: 10_000
#[serde(default = "default_well_known_timeout")]
pub well_known_timeout: u64,
/// Federation client request timeout. You most definitely want
/// this to be high to account for extremely large room joins, slow
/// homeservers, your own resources etc.
///
/// default: 300_000
#[serde(default = "default_federation_timeout")]
pub federation_timeout: u64,
/// Federation client request retry times.
///
/// default: 3
#[serde(default = "default_federation_retries")]
pub federation_retries: u32,
/// Federation client idle connection pool timeout.
///
/// default: 25
#[serde(default = "default_federation_idle_timeout")]
pub federation_idle_timeout: u64,
/// Federation client max idle connections per host. Defaults to 1 as
/// generally the same open connection can be re-used.
///
/// default: 1
#[serde(default = "default_federation_idle_per_host")]
pub federation_idle_per_host: u16,
// /// Federation sender request timeout. The time it takes for the
// /// remote server to process sent transactions can take a while.
// ///
// /// default: 180_000
// #[serde(default = "default_sender_timeout")]
// pub sender_timeout: u64,
// /// Federation sender idle connection pool timeout.
// ///
// /// default: 180_000
// #[serde(default = "default_sender_idle_timeout")]
// pub sender_idle_timeout: u64,
// /// Federation sender transaction retry backoff limit.
// ///
// /// default: 86400_000
// #[serde(default = "default_sender_retry_backoff_limit")]
// pub sender_retry_backoff_limit: u64,
}
impl Default for HttpClientConfig {
fn default() -> Self {
Self {
well_known_conn_timeout: default_well_known_conn_timeout(),
well_known_timeout: default_well_known_timeout(),
federation_timeout: default_federation_timeout(),
federation_retries: default_federation_retries(),
federation_idle_timeout: default_federation_idle_timeout(),
federation_idle_per_host: default_federation_idle_per_host(),
// sender_timeout: default_sender_timeout(),
// sender_idle_timeout: default_sender_idle_timeout(),
// sender_retry_backoff_limit: default_sender_retry_backoff_limit(),
}
}
}
fn default_well_known_conn_timeout() -> u64 {
6_000
}
fn default_well_known_timeout() -> u64 {
10_000
}
fn default_federation_timeout() -> u64 {
25_000
}
fn default_federation_retries() -> u32 {
2
}
fn default_federation_idle_timeout() -> u64 {
25_000
}
fn default_federation_idle_per_host() -> u16 {
1
}
// fn default_sender_timeout() -> u64 {
// 180_000
// }
// fn default_sender_idle_timeout() -> u64 {
// 180_000
// }
// fn default_sender_retry_backoff_limit() -> u64 {
// 86400
// }
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/jwt.rs | crates/server/src/config/jwt.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "jwt")]
#[derive(Clone, Debug, Default, Deserialize)]
pub struct JwtConfig {
/// Enable JWT logins
///
/// default: false
#[serde(default = "default_true")]
pub enable: bool,
/// Validation secret key. The type of key can be configured in 'format', but defaults to the common HMAC which
/// is a plaintext shared-secret, so you should keep this value private.
///
/// display: sensitive
/// default:
#[serde(default)]
pub secret: String,
/// Format of the 'key'. Only HMAC, ECDSA, and B64HMAC are supported
/// Binary keys cannot be pasted into this config, so B64HMAC is an
/// alternative to HMAC for properly random secret strings.
/// - HMAC is a plaintext shared-secret private-key.
/// - B64HMAC is a base64-encoded version of HMAC.
/// - ECDSA is a PEM-encoded public-key.
///
/// default: "HMAC"
#[serde(default = "default_jwt_format")]
pub format: String,
/// Automatically create new user from a valid claim, otherwise access is
/// denied for an unknown even with an authentic token.
///
/// default: true
#[serde(default = "crate::core::serde::default_true")]
pub register_user: bool,
/// JWT algorithm
///
/// default: "HS256"
#[serde(default = "default_jwt_algorithm")]
pub algorithm: String,
/// Optional audience claim list. The token must claim one or more values
/// from this list when set.
///
/// default: []
#[serde(default)]
pub audience: Vec<String>,
/// Optional issuer claim list. The token must claim one or more values
/// from this list when set.
///
/// default: []
#[serde(default)]
pub issuer: Vec<String>,
/// Require expiration claim in the token. This defaults to false for
/// synapse migration compatibility.
///
/// default: false
#[serde(default)]
pub require_exp: bool,
/// Require not-before claim in the token. This defaults to false for
/// synapse migration compatibility.
///
/// default: false
#[serde(default)]
pub require_nbf: bool,
/// Validate expiration time of the token when present. Whether or not it is
/// required depends on require_exp, but when present this ensures the token
/// is not used after a time.
///
/// default: true
#[serde(default = "crate::core::serde::default_true")]
pub validate_exp: bool,
/// Validate not-before time of the token when present. Whether or not it is
/// required depends on require_nbf, but when present this ensures the token
/// is not used before a time.
///
/// default: true
#[serde(default = "crate::core::serde::default_true")]
pub validate_nbf: bool,
/// Bypass validation for diagnostic/debug use only.
///
/// default: true
#[serde(default = "crate::core::serde::default_true")]
pub validate_signature: bool,
}
fn default_jwt_algorithm() -> String {
"HS256".to_owned()
}
fn default_jwt_format() -> String {
"HMAC".to_owned()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/url_preview.rs | crates/server/src/config/url_preview.rs | use std::net::IpAddr;
use either::Either;
use serde::{Deserialize, Serialize};
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "url_preview")]
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct UrlPreviewConfig {
/// Optional IP address or network interface-name to bind as the source of
/// URL preview requests. If not set, it will not bind to a specific
/// address or interface.
///
/// Interface names only supported on Linux, Android, and Fuchsia platforms;
/// all other platforms can specify the IP address. To list the interfaces
/// on your system, use the command `ip link show`.
///
/// example: `"eth0"` or `"1.2.3.4"`
///
/// default:
#[serde(default, with = "either::serde_untagged_optional")]
pub bound_interface: Option<Either<IpAddr, String>>,
/// Vector list of domains allowed to send requests to for URL previews.
///
/// This is a *contains* match, not an explicit match. Putting "google.com"
/// will match "https://google.com" and
/// "http://mymaliciousdomainexamplegoogle.com" Setting this to "*" will
/// allow all URL previews. Please note that this opens up significant
/// attack surface to your server, you are expected to be aware of the risks
/// by doing so.
///
/// default: []
#[serde(default)]
pub domain_contains_allowlist: Vec<String>,
/// Vector list of explicit domains allowed to send requests to for URL
/// previews.
///
/// This is an *explicit* match, not a contains match. Putting "google.com"
/// will match "https://google.com", "http://google.com", but not
/// "https://mymaliciousdomainexamplegoogle.com". Setting this to "*" will
/// allow all URL previews. Please note that this opens up significant
/// attack surface to your server, you are expected to be aware of the risks
/// by doing so.
///
/// default: []
#[serde(default)]
pub domain_explicit_allowlist: Vec<String>,
/// Vector list of explicit domains not allowed to send requests to for URL
/// previews.
///
/// This is an *explicit* match, not a contains match. Putting "google.com"
/// will match "https://google.com", "http://google.com", but not
/// "https://mymaliciousdomainexamplegoogle.com". The denylist is checked
/// first before allowlist. Setting this to "*" will not do anything.
///
/// default: []
#[serde(default)]
pub domain_explicit_denylist: Vec<String>,
/// Vector list of URLs allowed to send requests to for URL previews.
///
/// Note that this is a *contains* match, not an explicit match. Putting
/// "google.com" will match "https://google.com/",
/// "https://google.com/url?q=https://mymaliciousdomainexample.com", and
/// "https://mymaliciousdomainexample.com/hi/google.com" Setting this to "*"
/// will allow all URL previews. Please note that this opens up significant
/// attack surface to your server, you are expected to be aware of the risks
/// by doing so.
///
/// default: []
#[serde(default)]
pub url_contains_allowlist: Vec<String>,
/// Maximum amount of bytes allowed in a URL preview body size when
/// spidering. Defaults to 256KB in bytes.
///
/// default: 256000
#[serde(default = "default_max_spider_size")]
pub max_spider_size: usize,
/// Option to decide whether you would like to run the domain allowlist
/// checks (contains and explicit) on the root domain or not. Does not apply
/// to URL contains allowlist. Defaults to false.
///
/// Example usecase: If this is enabled and you have "wikipedia.org" allowed
/// in the explicit and/or contains domain allowlist, it will allow all
/// subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the
/// root domain is checked and matched. Useful if the domain contains
/// allowlist is still too broad for you but you still want to allow all the
/// subdomains under a root domain.
#[serde(default)]
pub check_root_domain: bool,
}
impl UrlPreviewConfig {
pub fn check(&self) {
if self.domain_contains_allowlist.contains(&"*".to_owned()) {
warn!(
"All URLs are allowed for URL previews via setting \
\"url_preview.domain_contains_allowlist\" to \"*\". This opens up significant \
attack surface to your server. You are expected to be aware of the risks by doing \
this."
);
}
if self.domain_explicit_allowlist.contains(&"*".to_owned()) {
warn!(
"All URLs are allowed for URL previews via setting \
\"url_preview.domain_explicit_allowlist\" to \"*\". This opens up significant \
attack surface to your server. You are expected to be aware of the risks by doing \
this."
);
}
if self.url_contains_allowlist.contains(&"*".to_owned()) {
warn!(
"All URLs are allowed for URL previews via setting \
\"url_preview.url_contains_allowlist\" to \"*\". This opens up significant attack \
surface to your server. You are expected to be aware of the risks by doing this."
);
}
}
}
fn default_max_spider_size() -> usize {
256_000 // 256KB
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/presence.rs | crates/server/src/config/presence.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "presence")]
#[derive(Clone, Debug, Deserialize)]
pub struct PresenceConfig {
/// Allow local (your server only) presence updates/requests.
///
/// Note that presence on palpo is very fast unlike Synapse's. If using
/// outgoing presence, this MUST be enabled.
#[serde(default = "default_true")]
pub allow_local: bool,
/// Allow incoming federated presence updates/requests.
///
/// This option receives presence updates from other servers, but does not
/// send any unless `allow_outgoing_presence` is true. Note that presence on
/// palpo is very fast unlike Synapse's.
#[serde(default = "default_true")]
pub allow_incoming: bool,
/// Allow outgoing presence updates/requests.
///
/// This option sends presence updates to other servers, but does not
/// receive any unless `allow_incoming_presence` is true. Note that presence
/// on palpo is very fast unlike Synapse's. If using outgoing presence,
/// you MUST enable `allow_local_presence` as well.
#[serde(default = "default_true")]
pub allow_outgoing: bool,
/// How many seconds without presence updates before you become idle.
/// Defaults to 5 minutes.
///
/// default: 300_000
#[serde(default = "default_presence_idle_timeout")]
pub idle_timeout: u64,
/// How many seconds without presence updates before you become offline.
/// Defaults to 30 minutes.
///
/// default: 1800_000
#[serde(default = "default_presence_offline_timeout")]
pub offline_timeout: u64,
/// Enable the presence idle timer for remote users.
///
/// Disabling is offered as an optimization for servers participating in
/// many large rooms or when resources are limited. Disabling it may cause
/// incorrect presence states (i.e. stuck online) to be seen for some remote
/// users.
#[serde(default = "default_true")]
pub timeout_remote_users: bool,
}
impl Default for PresenceConfig {
fn default() -> Self {
Self {
allow_local: true,
allow_incoming: true,
allow_outgoing: true,
idle_timeout: default_presence_idle_timeout(),
offline_timeout: default_presence_offline_timeout(),
timeout_remote_users: true,
}
}
}
fn default_presence_offline_timeout() -> u64 {
30 * 60_000
}
fn default_presence_idle_timeout() -> u64 {
5 * 60_000
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/read_receipt.rs | crates/server/src/config/read_receipt.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "read_receipt")]
#[derive(Clone, Debug, Deserialize)]
pub struct ReadReceiptConfig {
/// Allow receiving incoming read receipts from remote servers.
#[serde(default = "default_true")]
pub allow_incoming: bool,
/// Allow sending read receipts to remote servers.
#[serde(default = "default_true")]
pub allow_outgoing: bool,
}
impl Default for ReadReceiptConfig {
fn default() -> Self {
Self {
allow_incoming: true,
allow_outgoing: true,
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/logger.rs | crates/server/src/config/logger.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "logger")]
#[derive(Clone, Debug, Deserialize)]
pub struct LoggerConfig {
/// Max log level for palpo. Allows debug, info, warn, or error.
///
/// See also:
/// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
///
/// **Caveat**:
/// For release builds, the tracing crate is configured to only implement
/// levels higher than error to avoid unnecessary overhead in the compiled
/// binary from trace macros. For debug builds, this restriction is not
/// applied.
///
/// default: "info"
#[serde(default = "default_level")]
pub level: String,
// pretty, compact, json
#[serde(default = "default_format")]
pub format: String,
/// Output logs with ANSI colours.
#[serde(default = "default_true")]
pub ansi_colors: bool,
/// Configures the span events which will be outputted with the log.
///
/// default: "none"
#[serde(default = "default_span_events")]
pub span_events: String,
/// Configures whether EnvFilter matches values using regular expressions.
/// See the tracing_subscriber documentation on Directives.
///
/// default: true
#[serde(default = "default_true")]
pub filter_regex: bool,
/// Toggles the display of ThreadId in tracing log output.
///
/// default: false
#[serde(default)]
pub thread_ids: bool,
/// Set to true to log guest registrations in the admin room. Note that
/// these may be noisy or unnecessary if you're a public homeserver.
#[serde(default)]
pub guest_registrations: bool,
}
impl Default for LoggerConfig {
fn default() -> Self {
Self {
level: default_level(),
format: default_format(),
ansi_colors: true,
span_events: default_span_events(),
filter_regex: true,
thread_ids: false,
guest_registrations: false,
}
}
}
/// do debug logging by default for debug builds
#[must_use]
pub fn default_level() -> String {
cfg!(debug_assertions)
.then_some("debug")
.unwrap_or("info")
.to_owned()
}
/// do compact logging by default
#[must_use]
pub fn default_format() -> String {
"pretty".to_owned()
}
#[must_use]
pub fn default_span_events() -> String {
"none".into()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/blurhash.rs | crates/server/src/config/blurhash.rs | use serde::Deserialize;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "blurhash")]
#[derive(Clone, Debug, Deserialize)]
pub struct BlurhashConfig {
/// blurhash x component, 4 is recommended by https://blurha.sh/
///
/// default: 4
#[serde(default = "default_components_x")]
pub components_x: u32,
/// blurhash y component, 3 is recommended by https://blurha.sh/
///
/// default: 3
#[serde(default = "default_components_y")]
pub components_y: u32,
/// Max raw size that the server will blurhash, this is the size of the
/// image after converting it to raw data, it should be higher than the
/// upload limit but not too high. The higher it is the higher the
/// potential load will be for clients requesting blurhashes. The default
/// is 33.55MB. Setting it to 0 disables blurhash.
///
/// default: 33554432
#[serde(default = "default_blurhash_max_raw_size")]
pub max_raw_size: u64,
}
impl Default for BlurhashConfig {
fn default() -> Self {
Self {
components_x: default_components_x(),
components_y: default_components_y(),
max_raw_size: default_blurhash_max_raw_size(),
}
}
}
// blurhash defaults recommended by https://blurha.sh/
// 2^25
fn default_blurhash_max_raw_size() -> u64 {
33_554_432
}
fn default_components_x() -> u32 {
4
}
fn default_components_y() -> u32 {
3
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/acme.rs | crates/server/src/config/acme.rs | use std::collections::BTreeMap;
use std::fmt;
use std::net::IpAddr;
use std::path::PathBuf;
use serde::Deserialize;
use crate::core::serde::{default_false, default_true};
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "acme")]
#[derive(Clone, Debug, Deserialize, Default)]
pub struct AcmeConfig {
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/appservice.rs | crates/server/src/config/appservice.rs | use std::collections::BTreeMap;
use std::fmt;
use std::net::IpAddr;
use std::path::PathBuf;
use serde::Deserialize;
use crate::core::serde::{default_false, default_true};
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "appservice")]
#[derive(Clone, Debug, Deserialize, Default)]
pub struct AppServiceConfig {
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/media.rs | crates/server/src/config/media.rs | use regex::RegexSet;
use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "media")]
#[derive(Clone, Debug, Deserialize)]
pub struct MediaConfig {
/// Enable the legacy unauthenticated Matrix media repository endpoints.
/// These endpoints consist of:
/// - /_matrix/media/*/config
/// - /_matrix/media/*/upload
/// - /_matrix/media/*/preview_url
/// - /_matrix/media/*/download/*
/// - /_matrix/media/*/thumbnail/*
///
/// The authenticated equivalent endpoints are always enabled.
///
/// Defaults to true for now, but this is highly subject to change, likely
/// in the next release.
#[serde(default = "default_true")]
pub allow_legacy: bool,
#[serde(default = "default_true")]
pub freeze_legacy: bool,
/// Check consistency of the media directory at startup:
/// 1. When `media_compat_file_link` is enabled, this check will upgrade
/// media when switching back and forth between Conduit and palpo.
/// Both options must be enabled to handle this.
/// 2. When media is deleted from the directory, this check will also delete
/// its database entry.
///
/// If none of these checks apply to your use cases, and your media
/// directory is significantly large setting this to false may reduce
/// startup time.
#[serde(default = "default_true")]
pub startup_check: bool,
/// Enable backward-compatibility with Conduit's media directory by creating
/// symlinks of media.
///
/// This option is only necessary if you plan on using Conduit again.
/// Otherwise setting this to false reduces filesystem clutter and overhead
/// for managing these symlinks in the directory. This is now disabled by
/// default. You may still return to upstream Conduit but you have to run
/// palpo at least once with this set to true and allow the
/// media_startup_check to take place before shutting down to return to
/// Conduit.
#[serde(default)]
pub compat_file_link: bool,
/// Prune missing media from the database as part of the media startup
/// checks.
///
/// This means if you delete files from the media directory the
/// corresponding entries will be removed from the database. This is
/// disabled by default because if the media directory is accidentally moved
/// or inaccessible, the metadata entries in the database will be lost with
/// sadness.
#[serde(default)]
pub prune_missing: bool,
/// Vector list of regex patterns of server names that palpo will refuse
/// to download remote media from.
///
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
///
/// default: []
#[serde(default, with = "serde_regex")]
pub prevent_downloads_from: RegexSet,
}
impl Default for MediaConfig {
fn default() -> Self {
Self {
allow_legacy: true,
freeze_legacy: true,
startup_check: true,
compat_file_link: false,
prune_missing: false,
prevent_downloads_from: Default::default(),
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/dns.rs | crates/server/src/config/dns.rs | use serde::Deserialize;
use crate::core::serde::{default_false, default_true};
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "dns")]
#[derive(Clone, Debug, Deserialize)]
pub struct DnsConfig {
/// Maximum entries stored in DNS memory-cache. The size of an entry may
/// vary so please take care if raising this value excessively. Only
/// decrease this when using an external DNS cache. Please note that
/// systemd-resolved does *not* count as an external cache, even when
/// configured to do so.
///
/// default: 32768
#[serde(default = "default_cache_entries")]
pub cache_entries: u32,
/// Minimum time-to-live in seconds for entries in the DNS cache. The
/// default may appear high to most administrators; this is by design as the
/// majority of NXDOMAINs are correct for a long time (e.g. the server is no
/// longer running Matrix). Only decrease this if you are using an external
/// DNS cache.
///
/// default: 10800
#[serde(default = "default_min_ttl")]
pub min_ttl: u64,
/// Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache.
/// This value is critical for the server to federate efficiently.
/// NXDOMAIN's are assumed to not be returning to the federation and
/// aggressively cached rather than constantly rechecked.
///
/// Defaults to 3 days as these are *very rarely* false negatives.
///
/// default: 259200
#[serde(default = "default_min_ttl_nxdomain")]
pub min_ttl_nxdomain: u64,
/// Number of DNS nameserver retries after a timeout or error.
///
/// default: 10
#[serde(default = "default_attempts")]
pub attempts: u16,
/// The number of seconds to wait for a reply to a DNS query. Please note
/// that recursive queries can take up to several seconds for some domains,
/// so this value should not be too low, especially on slower hardware or
/// resolvers.
///
/// default: 10
#[serde(default = "default_timeout")]
pub timeout: u64,
/// Fallback to TCP on DNS errors. Set this to false if unsupported by
/// nameserver.
#[serde(default = "default_true")]
pub tcp_fallback: bool,
}
impl Default for DnsConfig {
fn default() -> Self {
Self {
cache_entries: default_cache_entries(),
min_ttl: default_min_ttl(),
min_ttl_nxdomain: default_min_ttl_nxdomain(),
attempts: default_attempts(),
timeout: default_timeout(),
tcp_fallback: true,
}
}
}
fn default_cache_entries() -> u32 {
32768
}
fn default_min_ttl() -> u64 {
60 * 180
}
fn default_min_ttl_nxdomain() -> u64 {
60 * 60 * 24 * 3_000
}
fn default_attempts() -> u16 {
10
}
fn default_timeout() -> u64 {
10_000
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/ldap.rs | crates/server/src/config/ldap.rs | use std::path::PathBuf;
use serde::Deserialize;
use url::Url;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "ldap")]
#[derive(Clone, Debug, Default, Deserialize)]
pub struct LdapConfig {
/// Whether to enable LDAP login.
///
/// example: "true"
#[serde(default = "default_true")]
pub enable: bool,
/// URI of the LDAP server.
///
/// example: "ldap://ldap.example.com:389"
pub uri: Option<Url>,
/// Root of the searches.
///
/// example: "ou=users,dc=example,dc=org"
#[serde(default)]
pub base_dn: String,
/// Bind DN if anonymous search is not enabled.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username. In such case, the password used to bind will be the
/// one provided for the login and not the one given by
/// `bind_password_file`. Beware: automatically granting admin rights will
/// not work if you use this direct bind instead of a LDAP search.
///
/// example: "cn=ldap-reader,dc=example,dc=org" or
/// "cn={username},ou=users,dc=example,dc=org"
#[serde(default)]
pub bind_dn: Option<String>,
/// Path to a file on the system that contains the password for the
/// `bind_dn`.
///
/// The server must be able to access the file, and it must not be empty.
#[serde(default)]
pub bind_password_file: Option<PathBuf>,
/// Search filter to limit user searches.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(&(objectClass=person)(memberOf=matrix))"
///
/// default: "(objectClass=*)"
#[serde(default = "default_ldap_search_filter")]
pub filter: String,
/// Attribute to use to uniquely identify the user.
///
/// example: "uid" or "cn"
///
/// default: "uid"
#[serde(default = "default_ldap_uid_attribute")]
pub uid_attribute: String,
/// Attribute containing the mail of the user.
///
/// example: "mail"
///
/// default: "mail"
#[serde(default = "default_ldap_mail_attribute")]
pub mail_attribute: String,
/// Attribute containing the distinguished name of the user.
///
/// example: "givenName" or "sn"
///
/// default: "givenName"
#[serde(default = "default_ldap_name_attribute")]
pub name_attribute: String,
/// Root of the searches for admin users.
///
/// Defaults to `base_dn` if empty.
///
/// example: "ou=admins,dc=example,dc=org"
#[serde(default)]
pub admin_base_dn: String,
/// The LDAP search filter to find administrative users for palpo.
///
/// If left blank, administrative state must be configured manually for each
/// user.
///
/// You can use the variable `{username}` that will be replaced by the
/// entered username for more complex filters.
///
/// example: "(objectClass=palpoAdmin)" or "(uid={username})"
#[serde(default)]
pub admin_filter: String,
}
fn default_ldap_search_filter() -> String {
"(objectClass=*)".to_owned()
}
fn default_ldap_uid_attribute() -> String {
String::from("uid")
}
fn default_ldap_mail_attribute() -> String {
String::from("mail")
}
fn default_ldap_name_attribute() -> String {
String::from("givenName")
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/federation.rs | crates/server/src/config/federation.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "federation")]
#[derive(Clone, Debug, Deserialize)]
pub struct FederationConfig {
/// Controls whether federation is allowed or not. It is not recommended to
/// disable this after the fact due to potential federation breakage.
#[serde(default = "default_true")]
pub enable: bool,
/// Allows federation requests to be made to itself
///
/// This isn't intended and is very likely a bug if federation requests are
/// being sent to yourself. This currently mainly exists for development
/// purposes.
#[serde(default)]
pub allow_loopback: bool,
/// Set this to true to allow federating device display names / allow
/// external users to see your device display name. If federation is
/// disabled entirely (`allow_federation`), this is inherently false. For
/// privacy reasons, this is best left disabled.
#[serde(default)]
pub allow_device_name: bool,
/// Config option to allow or disallow incoming federation requests that
/// obtain the profiles of our local users from
/// `/_matrix/federation/v1/query/profile`
///
/// Increases privacy of your local user's such as display names, but some
/// remote users may get a false "this user does not exist" error when they
/// try to invite you to a DM or room. Also can protect against profile
/// spiders.
///
/// This is inherently false if `allow_federation` is disabled
#[serde(default = "default_true")]
pub allow_inbound_profile_lookup: bool,
}
impl Default for FederationConfig {
fn default() -> Self {
Self {
enable: true,
allow_loopback: false,
allow_device_name: false,
allow_inbound_profile_lookup: true,
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/admin.rs | crates/server/src/config/admin.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "admin")]
#[derive(Clone, Debug, Deserialize)]
pub struct AdminConfig {
/// Controls whether admin room notices like account registrations, password
/// changes, account deactivations, room directory publications, etc will be
/// sent to the admin room. Update notices and normal admin command
/// responses will still be sent.
#[serde(default = "default_true")]
pub room_notices: bool,
/// Allow admins to enter commands in rooms other than "#admins" (admin
/// room) by prefixing your message with "\!admin" or "\\!admin" followed up
/// a normal palpo admin command. The reply will be publicly visible to
/// the room, originating from the sender.
///
/// example: \\!admin debug ping example.com
#[serde(default = "default_true")]
pub escape_commands: bool,
/// Automatically activate the palpo admin room console / CLI on
/// startup. This option can also be enabled with `--console` palpo
/// argument.
#[serde(default)]
pub console_automatic: bool,
#[allow(clippy::doc_link_with_quotes)]
/// List of admin commands to execute on startup.
///
/// This option can also be configured with the `--execute` palpo
/// argument and can take standard shell commands and environment variables
///
/// For example: `./palpo --execute "server admin-notice palpo has
/// started up at $(date)"`
///
/// example: admin_execute = ["debug ping example.com", "debug echo hi"]`
///
/// default: []
#[serde(default)]
pub startup_execute: Vec<String>,
/// Ignore errors in startup commands.
///
/// If false, palpo will error and fail to start if an admin execute
/// command (`--execute` / `admin_execute`) fails.
#[serde(default)]
pub execute_errors_ignore: bool,
/// List of admin commands to execute on SIGUSR2.
///
/// Similar to admin_execute, but these commands are executed when the
/// server receives SIGUSR2 on supporting platforms.
///
/// default: []
#[serde(default)]
pub signal_execute: Vec<String>,
/// Controls the max log level for admin command log captures (logs
/// generated from running admin commands). Defaults to "info" on release
/// builds, else "debug" on debug builds.
///
/// default: "info"
#[serde(default = "default_log_capture")]
pub log_capture: String,
/// The default room tag to apply on the admin room.
///
/// On some clients like Element, the room tag "m.server_notice" is a
/// special pinned room at the very bottom of your room list. The palpo
/// admin room can be pinned here so you always have an easy-to-access
/// shortcut dedicated to your admin room.
///
/// default: "m.server_notice"
#[serde(default = "default_room_tag")]
pub room_tag: String,
}
impl Default for AdminConfig {
fn default() -> Self {
Self {
room_notices: true,
escape_commands: true,
console_automatic: false,
startup_execute: Vec::new(),
execute_errors_ignore: false,
signal_execute: Vec::new(),
log_capture: default_log_capture(),
room_tag: default_room_tag(),
}
}
}
fn default_log_capture() -> String {
cfg!(debug_assertions)
.then_some("debug")
.unwrap_or("info")
.to_owned()
}
fn default_room_tag() -> String {
"m.server_notice".to_owned()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/oidc.rs | crates/server/src/config/oidc.rs | use std::collections::BTreeMap;
use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "oidc")]
#[derive(Clone, Debug, Deserialize, Default)]
pub struct OidcConfig {
/// Enable OIDC/OAuth authentication
///
/// Allows users to sign in using external providers (Google, GitHub, etc.)
/// instead of Matrix passwords
///
/// default: false
#[serde(default)]
pub enable: bool,
/// Provider configurations
///
/// Map of provider name to configuration. Each provider needs:
/// - issuer: Provider base URL
/// - client_id: OAuth app ID
/// - client_secret: OAuth app secret
/// - redirect_uri: Callback URL (must match provider settings)
/// - scopes (optional): Permissions to request
/// - display_name (optional): UI display text
///
/// GitHub example:
/// ```toml
/// [oidc.providers.github]
/// issuer = "https://github.com"
/// client_id = "your_app_id"
/// client_secret = "your_secret"
/// redirect_uri = "https://server/_matrix/client/oidc/callback"
/// scopes = ["read:user", "user:email"]
/// ```
///
/// default: {}
#[serde(default)]
pub providers: BTreeMap<String, OidcProviderConfig>,
/// Default provider name
///
/// Used when accessing /oidc/auth without specifying provider
///
/// example: "github"
/// default: None (first alphabetically)
pub default_provider: Option<String>,
/// Auto-create new users on first login
///
/// When true: New accounts created automatically
/// When false: Only existing Matrix users can use OAuth login
///
/// default: true
#[serde(default = "default_true")]
pub allow_registration: bool,
/// User ID generation strategy (deprecated - auto-detected now)
///
/// The system now automatically chooses the best identifier:
/// 1. Username (GitHub login, preferred_username)
/// 2. Email prefix (john from john@example.com)
/// 3. Provider ID with "user" prefix
///
/// This field is kept for backwards compatibility
///
/// default: "email"
#[serde(default = "default_user_mapping")]
pub user_mapping: String,
/// Prefix for OAuth user IDs
///
/// Adds prefix to distinguish OAuth users from regular Matrix users
/// Empty string for cleaner usernames
///
/// example: "gh_" → @gh_username:server
/// default: ""
#[serde(default)]
pub user_prefix: String,
/// Require verified email for login
///
/// Set to false for GitHub users with private emails
/// Set to true for providers where email verification is critical
///
/// default: true
#[serde(default = "default_true")]
pub require_email_verified: bool,
/// OAuth session timeout (seconds)
///
/// Time limit for completing the OAuth flow
///
/// default: 600 (10 minutes)
#[serde(default = "default_session_timeout")]
pub session_timeout: u64,
/// Enable PKCE security extension
///
/// Adds extra security to OAuth flow (recommended)
///
/// default: true
#[serde(default = "default_true")]
pub enable_pkce: bool,
}
#[derive(Clone, Debug, Deserialize)]
pub struct OidcProviderConfig {
/// Provider base URL
///
/// OAuth provider's base URL (e.g., "https://github.com")
pub issuer: String,
/// OAuth app client ID
///
/// Get this from your OAuth app settings
pub client_id: String,
/// OAuth app client secret
///
/// Keep this secure - never commit to version control
pub client_secret: String,
/// Callback URL after authentication
///
/// Must exactly match the URL in your OAuth app settings
/// Format: "https://your-server/_matrix/client/oidc/callback"
pub redirect_uri: String,
/// Permissions to request from provider
///
/// GitHub: ["read:user", "user:email"]
/// Google: ["openid", "email", "profile"] (default)
#[serde(default = "default_scopes")]
pub scopes: Vec<String>,
/// Extra OAuth parameters (optional)
///
/// Provider-specific parameters
/// example: { "prompt" = "select_account" }
///
/// default: {}
#[serde(default)]
pub additional_params: BTreeMap<String, String>,
/// Skip TLS verification (DEV ONLY - INSECURE)
///
/// default: false
#[serde(default)]
pub skip_tls_verify: bool,
/// UI display text for this provider
///
/// example: "Sign in with GitHub"
/// default: Provider name
pub display_name: Option<String>,
/// Custom attribute mapping
///
/// Override the default mapping of OIDC claims to Matrix user attributes.
/// Keys are Matrix attributes, values are OIDC claim names.
///
/// example: { "display_name" = "given_name", "avatar_url" = "picture" }
///
/// default: {}
#[serde(default)]
pub attribute_mapping: BTreeMap<String, String>,
}
fn default_user_mapping() -> String {
"email".to_string()
}
fn default_session_timeout() -> u64 {
600 // 10 minutes
}
fn default_scopes() -> Vec<String> {
vec![
"openid".to_string(),
"email".to_string(),
"profile".to_string(),
]
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/typing.rs | crates/server/src/config/typing.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "typing")]
#[derive(Clone, Debug, Deserialize)]
pub struct TypingConfig {
/// Allow incoming typing updates from federation.
#[serde(default = "default_true")]
pub allow_incoming: bool,
/// Allow outgoing typing updates to federation.
#[serde(default = "default_true")]
pub allow_outgoing: bool,
/// Maximum time federation user can indicate typing.
///
/// default: 30_000
#[serde(default = "default_federation_timeout")]
pub federation_timeout: u64,
/// Minimum time local client can indicate typing. This does not override a
/// client's request to stop typing. It only enforces a minimum value in
/// case of no stop request.
///
/// default: 15_000
#[serde(default = "default_client_timeout_min")]
pub client_timeout_min: u64,
/// Maximum time local client can indicate typing.
///
/// default: 45_000
#[serde(default = "default_client_timeout_max")]
pub client_timeout_max: u64,
}
impl Default for TypingConfig {
fn default() -> Self {
Self {
allow_incoming: true,
allow_outgoing: true,
federation_timeout: default_federation_timeout(),
client_timeout_min: default_client_timeout_min(),
client_timeout_max: default_client_timeout_max(),
}
}
}
fn default_federation_timeout() -> u64 {
30_000
}
fn default_client_timeout_min() -> u64 {
15_000
}
fn default_client_timeout_max() -> u64 {
45_000
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/server.rs | crates/server/src/config/server.rs | use std::collections::BTreeMap;
use std::path::PathBuf;
use regex::RegexSet;
use salvo::http::HeaderValue;
use serde::Deserialize;
use serde::de::IgnoredAny;
use url::Url;
use super::{
AdminConfig, BlurhashConfig, CompressionConfig, DbConfig, FederationConfig, HttpClientConfig,
JwtConfig, LoggerConfig, MediaConfig, OidcConfig, PresenceConfig, ProxyConfig,
ReadReceiptConfig, TurnConfig, TypingConfig, UrlPreviewConfig,
};
use crate::core::client::discovery::support::ContactRole;
use crate::core::serde::{default_false, default_true};
use crate::core::{OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId};
use crate::env_vars::required_var;
use crate::macros::config_example;
use crate::utils::sys;
use crate::{AppError, AppResult};
const DEPRECATED_KEYS: &[&str; 0] = &[];
#[derive(Clone, Debug, Deserialize, Default)]
pub struct WellKnownConfig {
pub client: Option<String>,
pub server: Option<OwnedServerName>,
pub support_page: Option<Url>,
pub support_role: Option<ContactRole>,
pub support_email: Option<String>,
pub support_mxid: Option<OwnedUserId>,
}
#[derive(Clone, Debug, Deserialize, Default)]
pub struct KeypairConfig {
pub document: String,
pub version: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct ListenerConfig {
/// The default address (IPv4 or IPv6) and port palpo will listen on.
#[serde(default = "default_listen_address")]
pub address: String,
#[serde(default)]
pub x_forwarded: bool,
// external structure; separate section
pub tls: Option<TlsConfig>,
}
impl Default for ListenerConfig {
fn default() -> Self {
Self {
address: default_listen_address(),
x_forwarded: false,
tls: None,
}
}
}
impl ListenerConfig {
pub fn enabled_tls(&self) -> Option<&TlsConfig> {
if let Some(tls) = self.tls.as_ref() {
if tls.enable { Some(tls) } else { None }
} else {
None
}
}
}
#[config_example(
filename = "palpo-example.toml",
undocumented = "# This item is undocumented. Please contribute documentation for it.",
header = r#"### Palpo Configuration
###
### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE
### OVERWRITTEN!
###
### You should rename this file before configuring your server. Changes to
### documentation and defaults can be contributed in source code at
### crate/server/config/server.rs. This file is generated when building.
###
### Any values pre-populated are the default values for said config option.
###
### At the minimum, you MUST edit all the config options to your environment
### that say "YOU NEED TO EDIT THIS".
###
### For more information, see:
### https://palpo.im/guide/configuration.html
"#,
ignore = "catch_others federation well_known compression typing read_receipt presence \
admin url_preview turn media blurhash keypair ldap proxy jwt oidc logger db appservice"
)]
#[derive(Clone, Debug, Deserialize)]
pub struct ServerConfig {
#[serde(default = "default_listener")]
pub listeners: Vec<ListenerConfig>,
/// The server_name is the pretty name of this server. It is used as a
/// suffix for user and room IDs/aliases.
/// YOU NEED TO EDIT THIS.
///
/// example: "palpo.im"
#[serde(default = "default_server_name")]
pub server_name: OwnedServerName,
pub home_page: Option<String>,
// display: hidden
#[serde(default)]
pub db: DbConfig,
// display: hidden
#[serde(default = "default_true")]
pub allow_check_for_updates: bool,
#[serde(default = "default_max_concurrent_requests")]
pub max_concurrent_requests: u16,
/// Text which will be added to the end of the user's displayname upon
/// registration with a space before the text. In Conduit, this was the
/// lightning bolt emoji.
///
/// To disable, set this to "" (an empty string).
///
/// default: "💕"
#[serde(default = "default_new_user_displayname_suffix")]
pub new_user_displayname_suffix: String,
// /// The UNIX socket palpo will listen on.
// ///
// /// palpo cannot listen on both an IP address and a UNIX socket. If
// /// listening on a UNIX socket, you MUST remove/comment the `address` key.
// ///
// /// Remember to make sure that your reverse proxy has access to this socket
// /// file, either by adding your reverse proxy to the 'palpo' group or
// /// granting world R/W permissions with `unix_socket_perms` (666 minimum).
// ///
// /// example: "/run/palpo/palpo.sock"
// pub unix_socket_path: Option<PathBuf>,
// /// The default permissions (in octal) to create the UNIX socket with.
// ///
// /// default: 660
// #[serde(default = "default_unix_socket_perms")]
// pub unix_socket_perms: u32,
/// Enable to query all nameservers until the domain is found. Referred to
/// as "trust_negative_responses" in hickory_resolver. This can avoid
/// useless DNS queries if the first nameserver responds with NXDOMAIN or
/// an empty NOERROR response.
#[serde(default = "default_true")]
pub query_all_nameservers: bool,
/// Enable using *only* TCP for querying your specified nameservers instead
/// of UDP.
///
/// If you are running palpo in a container environment, this config
/// option may need to be enabled. For more details, see:
/// https://palpo.im/troubleshooting.html#potential-dns-issues-when-using-docker
#[serde(default)]
pub query_over_tcp_only: bool,
/// DNS A/AAAA record lookup strategy
///
/// Takes a number of one of the following options:
/// 1 - Ipv4Only (Only query for A records, no AAAA/IPv6)
///
/// 2 - Ipv6Only (Only query for AAAA records, no A/IPv4)
///
/// 3 - Ipv4AndIpv6 (Query for A and AAAA records in parallel, uses whatever
/// returns a successful response first)
///
/// 4 - Ipv6thenIpv4 (Query for AAAA record, if that fails then query the A
/// record)
///
/// 5 - Ipv4thenIpv6 (Query for A record, if that fails then query the AAAA
/// record)
///
/// If you don't have IPv6 networking, then for better DNS performance it
/// may be suitable to set this to Ipv4Only (1) as you will never ever use
/// the AAAA record contents even if the AAAA record is successful instead
/// of the A record.
///
/// default: 5
#[serde(default = "default_ip_lookup_strategy")]
pub ip_lookup_strategy: u8,
/// Max request size for file uploads in bytes. Defaults to 20MB.
///
/// default: 20971520
#[serde(default = "default_max_upload_size")]
pub max_upload_size: u32,
/// default: 192
#[serde(default = "default_max_fetch_prev_events")]
pub max_fetch_prev_events: u16,
/// Default/base connection timeout. This is used only by URL
/// previews and update/news endpoint checks.
///
/// default: 10_000
#[serde(default = "default_request_conn_timeout")]
pub request_conn_timeout: u64,
/// Default/base request timeout. The time waiting to receive more
/// data from another server. This is used only by URL previews,
/// update/news, and misc endpoint checks.
///
/// default: 35_000
#[serde(default = "default_request_timeout")]
pub request_timeout: u64,
/// Default/base request total timeout. The time limit for a whole
/// request. This is set very high to not cancel healthy requests while
/// serving as a backstop. This is used only by URL previews and update/news
/// endpoint checks.
///
/// default: 320_000
#[serde(default = "default_request_total_timeout")]
pub request_total_timeout: u64,
/// Default/base idle connection pool timeout. This is used only
/// by URL previews and update/news endpoint checks.
///
/// default: 5_000
#[serde(default = "default_request_idle_timeout")]
pub request_idle_timeout: u64,
/// Default/base max idle connections per host. This is used only by URL
/// previews and update/news endpoint checks. Defaults to 1 as generally the
/// same open connection can be re-used.
///
/// default: 1
#[serde(default = "default_request_idle_per_host")]
pub request_idle_per_host: u16,
/// Appservice URL request connection timeout. Defaults to 35 seconds as
/// generally appservices are hosted within the same network.
///
/// default: 35
#[serde(default = "default_appservice_timeout")]
pub appservice_timeout: u64,
/// Appservice URL idle connection pool timeout
///
/// default: 300_000
#[serde(default = "default_appservice_idle_timeout")]
pub appservice_idle_timeout: u64,
/// Notification gateway pusher idle connection pool timeout.
///
/// default: 15_000
#[serde(default = "default_pusher_idle_timeout")]
pub pusher_idle_timeout: u64,
/// Maximum time to receive a request from a client
///
/// default: 75_000
#[serde(default = "default_client_receive_timeout")]
pub client_receive_timeout: u64,
/// Maximum time to process a request received from a client
///
/// default: 180_000
#[serde(default = "default_client_request_timeout")]
pub client_request_timeout: u64,
/// Maximum time to transmit a response to a client
///
/// default: 120_000
#[serde(default = "default_client_response_timeout")]
pub client_response_timeout: u64,
/// Grace period for clean shutdown of client requests.
///
/// default: 10_000
#[serde(default = "default_client_shutdown_timeout")]
pub client_shutdown_timeout: u64,
/// Grace period for clean shutdown of federation requests.
///
/// default: 5_000
#[serde(default = "default_sender_shutdown_timeout")]
pub sender_shutdown_timeout: u64,
/// Path to a file on the system that gets read for additional registration
/// tokens. Multiple tokens can be added if you separate them with
/// whitespace
///
/// palpo must be able to access the file, and it must not be empty
///
/// example: "/etc/palpo/.reg_token"
pub registration_token_file: Option<PathBuf>,
/// Always calls /forget on behalf of the user if leaving a room. This is a
/// part of MSC4267 "Automatically forgetting rooms on leave"
#[serde(default)]
pub forget_forced_upon_leave: bool,
/// Set this to true to require authentication on the normally
/// unauthenticated profile retrieval endpoints (GET)
/// "/_matrix/client/v3/profile/{user_id}".
///
/// This can prevent profile scraping.
#[serde(default)]
pub require_auth_for_profile_requests: bool,
/// Enables registration. If set to false, no users can register on this
/// server.
///
/// If set to true without a token configured, users can register with no
/// form of 2nd-step only if you set the following option to true:
/// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
///
/// If you would like registration only via token reg, please configure
/// `registration_token` or `registration_token_file`.
#[serde(default = "default_false")]
pub allow_registration: bool,
/// Enabling this setting opens registration to anyone without restrictions.
/// This makes your server vulnerable to abuse
#[serde(default)]
pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool,
/// A static registration token that new users will have to provide when
/// creating an account. If unset and `allow_registration` is true,
/// you must set
/// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`
/// to true to allow open registration without any conditions.
///
/// YOU NEED TO EDIT THIS OR USE registration_token_file.
///
/// example: "o&^uCtes4HPf0Vu@F20jQeeWE7"
///
/// display: sensitive
pub registration_token: Option<String>,
/// Controls whether encrypted rooms and events are allowed.
#[serde(default = "default_true")]
pub allow_encryption: bool,
/// Allow standard users to create rooms. Appservices and admins are always
/// allowed to create rooms
#[serde(default = "default_true")]
pub allow_room_creation: bool,
/// Set to false to disable users from joining or creating room versions
/// that aren't officially supported by palpo.
///
/// palpo officially supports room versions 6 - 11.
///
/// palpo has slightly experimental (though works fine in practice)
/// support for versions 3 - 5.
#[serde(default = "default_true")]
pub allow_unstable_room_versions: bool,
/// Default room version palpo will create rooms with.
///
/// Per spec, room version 11 is the default.
///
/// default: 11
#[serde(default = "default_default_room_version")]
pub default_room_version: RoomVersionId,
#[serde(default = "default_false")]
pub allow_jaeger: bool,
#[serde(default = "default_false")]
pub tracing_flame: bool,
#[serde(default = "default_true")]
pub enable_admin_room: bool,
pub appservice_registration_dir: Option<String>,
/// Servers listed here will be used to gather public keys of other servers
/// (notary trusted key servers).
///
/// Currently, palpo doesn't support inbound batched key requests, so
/// this list should only contain other Synapse servers.
///
/// example: ["matrix.org", "tchncs.de"]
///
/// default: ["matrix.org"]
#[serde(default = "default_trusted_servers")]
pub trusted_servers: Vec<OwnedServerName>,
/// OpenID token expiration/TTL.
///
/// These are the OpenID tokens that are primarily used for Matrix account
/// integrations (e.g. Vector Integrations in Element), *not* OIDC/OpenID
/// Connect/etc.
///
/// default: 3600
#[serde(default = "default_openid_token_ttl")]
pub openid_token_ttl: u64,
/// Allow an existing session to mint a login token for another client.
/// This requires interactive authentication, but has security ramifications
/// as a malicious client could use the mechanism to spawn more than one
/// session.
/// Enabled by default.
#[serde(default = "default_true")]
pub login_via_existing_session: bool,
/// Login token expiration/TTL in milliseconds.
///
/// These are short-lived tokens for the m.login.token endpoint.
/// This is used to allow existing sessions to create new sessions.
/// see login_via_existing_session.
///
/// default: 120000
#[serde(default = "default_login_token_ttl")]
pub login_token_ttl: u64,
#[serde(default = "default_refresh_token_ttl")]
pub refresh_token_ttl: u64,
#[serde(default = "default_session_ttl")]
pub session_ttl: u64,
/// List/vector of room IDs or room aliases that palpo will make newly
/// registered users join. The rooms specified must be rooms that you have
/// joined at least once on the server, and must be public.
///
/// example: ["#palpo:example.com",
/// "!eoIzvAvVwY23LPDay8:example.com"]
///
/// default: []
#[serde(default = "Vec::new")]
pub auto_join_rooms: Vec<OwnedRoomOrAliasId>,
/// Config option to automatically deactivate the account of any user who
/// attempts to join a:
/// - banned room
/// - forbidden room alias
/// - room alias or ID with a forbidden server name
///
/// This may be useful if all your banned lists consist of toxic rooms or
/// servers that no good faith user would ever attempt to join, and
/// to automatically remediate the problem without any admin user
/// intervention.
///
/// This will also make the user leave all rooms. Federation (e.g. remote
/// room invites) are ignored here.
///
/// Defaults to false as rooms can be banned for non-moderation-related
/// reasons and this performs a full user deactivation.
#[serde(default)]
pub auto_deactivate_banned_room_attempts: bool,
/// Block non-admin local users from sending room invites (local and
/// remote), and block non-admin users from receiving remote room invites.
///
/// Admins are always allowed to send and receive all room invites.
#[serde(default)]
pub block_non_admin_invites: bool,
/// Set this to true to allow your server's public room directory to be
/// federated. Set this to false to protect against /publicRooms spiders,
/// but will forbid external users from viewing your server's public room
/// directory. If federation is disabled entirely (`allow_federation`), this
/// is inherently false.
#[serde(default)]
pub allow_public_room_directory_over_federation: bool,
/// Set this to true to allow your server's public room directory to be
/// queried without client authentication (access token) through the Client
/// APIs. Set this to false to protect against /publicRooms spiders.
#[serde(default)]
pub allow_public_room_directory_without_auth: bool,
/// Set this to true to lock down your server's public room directory and
/// only allow admins to publish rooms to the room directory. Unpublishing
/// is still allowed by all users with this enabled.
#[serde(default)]
pub lockdown_public_room_directory: bool,
/// This is a password that can be configured that will let you login to the
/// server bot account (currently `@conduit`) for emergency troubleshooting
/// purposes such as recovering/recreating your admin room, or inviting
/// yourself back.
///
/// See https://palpo.im/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room.
///
/// Once this password is unset, all sessions will be logged out for
/// security purposes.
///
/// example: "x7k9m2p5#n8w1%q4r6"
///
/// display: sensitive
pub emergency_password: Option<String>,
/// default: "/_matrix/push/v1/notify"
#[serde(default = "default_notification_push_path")]
pub notification_push_path: String,
/// Set to true to allow user type "guest" registrations. Some clients like
/// Element attempt to register guest users automatically.
#[serde(default)]
pub allow_guest_registration: bool,
/// Set to true to log guest registrations in the admin room. Note that
/// these may be noisy or unnecessary if you're a public homeserver.
#[serde(default)]
pub log_guest_registrations: bool,
/// Set to true to allow guest registrations/users to auto join any rooms
/// specified in `auto_join_rooms`.
#[serde(default)]
pub allow_guests_auto_join_rooms: bool,
/// List of forbidden server names via regex patterns that we will block
/// incoming AND outgoing federation with, and block client room joins /
/// remote user invites.
///
/// This check is applied on the room ID, room alias, sender server name,
/// sender user's server name, inbound federation X-Matrix origin, and
/// outbound federation handler.
///
/// Basically "global" ACLs.
///
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
///
/// default: []
#[serde(default, with = "serde_regex")]
pub forbidden_remote_server_names: RegexSet,
/// List of forbidden server names via regex patterns that we will block all
/// outgoing federated room directory requests for. Useful for preventing
/// our users from wandering into bad servers or spaces.
///
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
///
/// default: []
#[serde(default, with = "serde_regex")]
pub forbidden_remote_room_directory_server_names: RegexSet,
/// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
/// do not want palpo to send outbound requests to. Defaults to
/// RFC1918, unroutable, loopback, multicast, and testnet addresses for
/// security.
///
/// Please be aware that this is *not* a guarantee. You should be using a
/// firewall with zones as doing this on the application layer may have
/// bypasses.
///
/// Currently this does not account for proxies in use like Synapse does.
///
/// To disable, set this to be an empty vector (`[]`).
///
/// Defaults to:
/// ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12",
/// "192.168.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "169.254.0.0/16",
/// "192.88.99.0/24", "198.18.0.0/15", "192.0.2.0/24", "198.51.100.0/24",
/// "203.0.113.0/24", "224.0.0.0/4", "::1/128", "fe80::/10", "fc00::/7",
/// "2001:db8::/32", "ff00::/8", "fec0::/10"]
#[serde(default = "default_ip_range_denylist")]
pub ip_range_denylist: Vec<String>,
#[serde(default = "default_space_path")]
pub space_path: String,
// pub auto_acme: Option<AcmeConfig>,
/// Whether to query the servers listed in trusted_servers first or query
/// the origin server first. For best security, querying the origin server
/// first is advised to minimize the exposure to a compromised trusted
/// server. For maximum federation/join performance this can be set to true,
/// however other options exist to query trusted servers first under
/// specific high-load circumstances and should be evaluated before setting
/// this to true.
#[serde(default)]
pub query_trusted_key_servers_first: bool,
/// Whether to query the servers listed in trusted_servers first
/// specifically on room joins. This option limits the exposure to a
/// compromised trusted server to room joins only. The join operation
/// requires gathering keys from many origin servers which can cause
/// significant delays. Therefor this defaults to true to mitigate
/// unexpected delays out-of-the-box. The security-paranoid or those
/// willing to tolerate delays are advised to set this to false. Note that
/// setting query_trusted_key_servers_first to true causes this option to
/// be ignored.
#[serde(default = "default_true")]
pub query_trusted_key_servers_first_on_join: bool,
/// Only query trusted servers for keys and never the origin server. This is
/// intended for clusters or custom deployments using their trusted_servers
/// as forwarding-agents to cache and deduplicate requests. Notary servers
/// do not act as forwarding-agents by default, therefor do not enable this
/// unless you know exactly what you are doing.
#[serde(default)]
pub only_query_trusted_key_servers: bool,
/// Maximum number of keys to request in each trusted server batch query.
///
/// default: 1024
#[serde(default = "default_trusted_server_batch_size")]
pub trusted_server_batch_size: usize,
/// List of forbidden room aliases and room IDs as strings of regex
/// patterns.
///
/// Regex can be used or explicit contains matches can be done by just
/// specifying the words (see example).
///
/// This is checked upon room alias creation, custom room ID creation if
/// used, and startup as warnings if any room aliases in your database have
/// a forbidden room alias/ID.
///
/// example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"]
///
/// default: []
#[serde(default, with = "serde_regex")]
pub forbidden_alias_names: RegexSet,
/// List of forbidden username patterns/strings.
///
/// Regex can be used or explicit contains matches can be done by just
/// specifying the words (see example).
///
/// This is checked upon username availability check, registration, and
/// startup as warnings if any local users in your database have a forbidden
/// username.
///
/// example: ["administrator", "b[a4]dusernam[3e]", "badphrase"]
///
/// default: []
#[serde(default, with = "serde_regex")]
pub forbidden_usernames: RegexSet,
/// Retry failed and incomplete messages to remote servers immediately upon
/// startup. This is called bursting. If this is disabled, said messages may
/// not be delivered until more messages are queued for that server. Do not
/// change this option unless server resources are extremely limited or the
/// scale of the server's deployment is huge. Do not disable this unless you
/// know what you are doing.
#[serde(default = "default_true")]
pub startup_netburst: bool,
/// Messages are dropped and not reattempted. The `startup_netburst` option
/// must be enabled for this value to have any effect. Do not change this
/// value unless you know what you are doing. Set this value to -1 to
/// reattempt every message without trimming the queues; this may consume
/// significant disk. Set this value to 0 to drop all messages without any
/// attempt at redelivery.
///
/// default: 50
#[serde(default = "default_startup_netburst_keep")]
pub startup_netburst_keep: i64,
// external structure; separate section
#[serde(default)]
pub logger: LoggerConfig,
// external structure; separate section
pub jwt: Option<JwtConfig>,
// external structure; separate section
pub proxy: Option<ProxyConfig>,
// // external structure; separate section
// pub ldap: Option<LdapConfig>,
// external structure; separate section
// display: hidden
pub keypair: Option<KeypairConfig>,
// external structure; separate section
#[serde(default)]
pub blurhash: BlurhashConfig,
// external structure; separate section
#[serde(default)]
pub media: MediaConfig,
// external structure; separate section
pub turn: Option<TurnConfig>,
// external structure; separate section
#[serde(default)]
pub url_preview: UrlPreviewConfig,
// external structure; separate section
#[serde(default)]
pub admin: AdminConfig,
// external structure; separate section
#[serde(default)]
pub presence: PresenceConfig,
// external structure; separate section
// display: hidden
#[serde(default)]
pub read_receipt: ReadReceiptConfig,
// external structure; separate section
#[serde(default)]
pub typing: TypingConfig,
// external structure; separate section
#[serde(default)]
pub compression: CompressionConfig,
// external structure; separate section
#[serde(default)]
pub well_known: WellKnownConfig,
// external structure; separate section
#[serde(default)]
pub federation: FederationConfig,
// external structure; separate section
#[serde(default)]
pub http_client: HttpClientConfig,
/// Enables configuration reload when the server receives SIGUSR1 on
/// supporting platforms.
///
/// default: true
#[serde(default = "default_true")]
pub config_reload_signal: bool,
/// Toggles ignore checking/validating TLS certificates
///
/// This applies to everything, including URL previews, federation requests,
/// etc. This is a hidden argument that should NOT be used in production as
/// it is highly insecure and I will personally yell at you if I catch you
/// using this.
#[serde(default)]
pub allow_invalid_tls_certificates: bool,
/// Number of sender task workers; determines sender parallelism. Default is
/// '0' which means the value is determined internally, likely matching the
/// number of tokio worker-threads or number of cores, etc. Override by
/// setting a non-zero value.
///
/// default: 0
#[serde(default)]
pub sender_workers: usize,
// external structure; separate section
pub oidc: Option<OidcConfig>,
// // external structure; separate section
// #[serde(default)]
// pub appservice: BTreeMap<String, AppService>,
#[serde(flatten)]
#[allow(clippy::zero_sized_map_values)]
// this is a catchall, the map shouldn't be zero at runtime
catch_others: BTreeMap<String, IgnoredAny>,
}
impl ServerConfig {
// pub fn enabled_ldap(&self) -> Option<&LdapConfig> {
// if let Some(ldap) = self.ldap.as_ref() {
// if ldap.enable { Some(ldap) } else { None }
// } else {
// None
// }
// }
pub fn enabled_jwt(&self) -> Option<&JwtConfig> {
if let Some(jwt) = self.jwt.as_ref() {
if jwt.enable { Some(jwt) } else { None }
} else {
None
}
}
pub fn enabled_oidc(&self) -> Option<&OidcConfig> {
if let Some(oidc) = self.oidc.as_ref() {
if oidc.enable { Some(oidc) } else { None }
} else {
None
}
}
pub fn enabled_turn(&self) -> Option<&TurnConfig> {
if let Some(turn) = self.turn.as_ref() {
if turn.enable { Some(turn) } else { None }
} else {
None
}
}
pub fn enabled_federation(&self) -> Option<&FederationConfig> {
if self.federation.enable {
Some(&self.federation)
} else {
None
}
}
pub fn well_known_client(&self) -> String {
if let Some(url) = &self.well_known.client {
url.to_string()
} else {
// How to check if under proxy?
// if let Some(listenser) = self.listeners.first() {
// if listenser.enabled_tls().is_none() {
// return format!("http://{}", self.server_name);
// }
// };
format!("https://{}", self.server_name)
}
}
pub fn well_known_server(&self) -> OwnedServerName {
match &self.well_known.server {
Some(server_name) => server_name.to_owned(),
None => {
if self.server_name.port().is_some() {
self.server_name.to_owned()
} else {
format!("{}:443", self.server_name.host())
.try_into()
.expect("Host from valid hostname + :443 must be valid")
}
}
}
}
pub fn check(&self) -> AppResult<()> {
if cfg!(debug_assertions) {
tracing::warn!("Note: palpo was built without optimisations (i.e. debug build)");
}
// if self
// .allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure
// {
// tracing::warn!(
// "\n\nWARNING: \n\nTLS CERTIFICATE VALIDATION IS DISABLED, THIS IS HIGHLY INSECURE AND SHOULD NOT BE USED IN PRODUCTION.\n\n"
// );
// }
self.warn_deprecated();
self.warn_unknown_key();
// if self.sentry && self.sentry_endpoint.is_none() {
// return Err(AppError::internal(
// "sentry_endpoint",
// "Sentry cannot be enabled without an endpoint set"
// ));
// }
// if cfg!(all(
// feature = "hardened_malloc",
// feature = "jemalloc",
// not(target_env = "msvc")
// )) {
// tracing::warn!(
// "hardened_malloc and jemalloc compile-time features are both enabled, this causes \
// jemalloc to be used."
// );
// }
// if cfg!(not(unix)) && self.unix_socket_path.is_some() {
// return Err(AppError::internal(
// "UNIX socket support is only available on *nix platforms. Please remove \
// 'unix_socket_path' from your config.",
// ));
// }
// if self.unix_socket_path.is_none() && self.get_bind_hosts().is_empty() {
// return Err(AppError::internal("No TCP addresses were specified to listen on"));
// }
// if self.unix_socket_path.is_none() && self.get_bind_ports().is_empty() {
// return EErr(AppError::internal("No ports were specified to listen on"));
// }
// if self.unix_socket_path.is_none() {
// self.get_bind_addrs().iter().for_each(|addr| {
// use std::path::Path;
// if addr.ip().is_loopback() {
// tracing::info!(
// "Found loopback listening address {addr}, running checks if we're in a \
// container."
// );
// if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists()
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | true |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/compression.rs | crates/server/src/config/compression.rs | use serde::Deserialize;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "compression")]
#[derive(Clone, Debug, Deserialize, Default)]
pub struct CompressionConfig {
/// Set this to true for palpo to compress HTTP response bodies using
/// zstd.
#[serde(default)]
pub enable_zstd: bool,
/// Set this to true for palpo to compress HTTP response bodies using
/// gzip.
///
/// If you are in a large amount of rooms, you may find that enabling this
/// is necessary to reduce the significantly large response bodies.
#[serde(default)]
pub enable_gzip: bool,
/// Set this to true for palpo to compress HTTP response bodies using
/// brotli.
#[serde(default)]
pub enable_brotli: bool,
}
impl CompressionConfig {
pub fn is_enabled(&self) -> bool {
self.enable_zstd || self.enable_gzip || self.enable_brotli
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/cache.rs | crates/server/src/config/cache.rs | use serde::Deserialize;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "cache")]
#[derive(Clone, Debug, Deserialize, Default)]
pub struct CacheConfig {}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/proxy.rs | crates/server/src/config/proxy.rs | use reqwest::{Proxy, Url};
use serde::Deserialize;
use crate::AppResult;
/// ## Examples:
/// - No proxy (default):
/// ```toml
/// proxy ="none"
/// ```
/// - Global proxy
/// ```toml
/// [global.proxy]
/// global = { url = "socks5h://localhost:9050" }
/// ```
/// - Proxy some domains
/// ```toml
/// [global.proxy]
/// [[global.proxy.by_domain]]
/// url = "socks5h://localhost:9050"
/// include = ["*.onion", "matrix.myspecial.onion"]
/// exclude = ["*.myspecial.onion"]
/// ```
/// ## Include vs. Exclude
/// If include is an empty list, it is assumed to be `["*"]`.
///
/// If a domain matches both the exclude and include list, the proxy will only be used if it was
/// included because of a more specific rule than it was excluded. In the above example, the proxy
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
#[derive(Default, Clone, Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ProxyConfig {
#[default]
None,
Global {
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
url: Url,
},
ByDomain(Vec<PartialProxyConfig>),
}
impl ProxyConfig {
pub fn to_proxy(&self) -> AppResult<Option<Proxy>> {
Ok(match self.clone() {
ProxyConfig::None => None,
ProxyConfig::Global { url } => Some(Proxy::all(url)?),
ProxyConfig::ByDomain(proxies) => Some(Proxy::custom(move |url| {
proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching proxy
})),
})
}
}
#[derive(Clone, Debug, Deserialize)]
pub struct PartialProxyConfig {
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
url: Url,
#[serde(default)]
include: Vec<WildCardedDomain>,
#[serde(default)]
exclude: Vec<WildCardedDomain>,
}
impl PartialProxyConfig {
pub fn for_url(&self, url: &Url) -> Option<&Url> {
let domain = url.domain()?;
let mut included_because = None; // most specific reason it was included
let mut excluded_because = None; // most specific reason it was excluded
if self.include.is_empty() {
// treat empty include list as `*`
included_because = Some(&WildCardedDomain::WildCard)
}
for wc_domain in &self.include {
if wc_domain.matches(domain) {
match included_because {
Some(prev) if !wc_domain.more_specific_than(prev) => (),
_ => included_because = Some(wc_domain),
}
}
}
for wc_domain in &self.exclude {
if wc_domain.matches(domain) {
match excluded_because {
Some(prev) if !wc_domain.more_specific_than(prev) => (),
_ => excluded_because = Some(wc_domain),
}
}
}
match (included_because, excluded_because) {
(Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), // included for a more specific reason than excluded
(Some(_), None) => Some(&self.url),
_ => None,
}
}
}
/// A domain name, that optionally allows a * as its first subdomain.
#[derive(Clone, Debug)]
pub enum WildCardedDomain {
WildCard,
WildCarded(String),
Exact(String),
}
impl WildCardedDomain {
pub fn matches(&self, domain: &str) -> bool {
match self {
WildCardedDomain::WildCard => true,
WildCardedDomain::WildCarded(d) => domain.ends_with(d),
WildCardedDomain::Exact(d) => domain == d,
}
}
pub fn more_specific_than(&self, other: &Self) -> bool {
match (self, other) {
(WildCardedDomain::WildCard, WildCardedDomain::WildCard) => false,
(_, WildCardedDomain::WildCard) => true,
(WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => other.matches(a),
(WildCardedDomain::WildCarded(a), WildCardedDomain::WildCarded(b)) => {
a != b && a.ends_with(b)
}
_ => false,
}
}
}
impl std::str::FromStr for WildCardedDomain {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// maybe do some domain validation?
Ok(if s.starts_with("*.") {
WildCardedDomain::WildCarded(s[1..].to_owned())
} else if s == "*" {
WildCardedDomain::WildCarded("".to_owned())
} else {
WildCardedDomain::Exact(s.to_owned())
})
}
}
impl<'de> Deserialize<'de> for WildCardedDomain {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
crate::utils::deserialize_from_str(deserializer)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/turn.rs | crates/server/src/config/turn.rs | use std::path::PathBuf;
use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "turn")]
#[derive(Clone, Debug, Deserialize, Default)]
pub struct TurnConfig {
#[serde(default = "default_true")]
pub enable: bool,
/// Static TURN username to provide the client if not using a shared secret
/// ("turn_secret"), It is recommended to use a shared secret over static
/// credentials.
#[serde(default)]
pub username: String,
/// Static TURN password to provide the client if not using a shared secret
/// ("turn_secret"). It is recommended to use a shared secret over static
/// credentials.
///
/// display: sensitive
#[serde(default)]
pub password: String,
/// Vector list of TURN URIs/servers to use.
///
/// Replace "example.turn.uri" with your TURN domain, such as the coturn
/// "realm" config option. If using TURN over TLS, replace the URI prefix
/// "turn:" with "turns:".
///
/// example: ["turn:example.turn.uri?transport=udp",
/// "turn:example.turn.uri?transport=tcp"]
///
/// default: []
#[serde(default = "Vec::new")]
pub uris: Vec<String>,
/// TURN secret to use for generating the HMAC-SHA1 hash apart of username
/// and password generation.
///
/// This is more secure, but if needed you can use traditional static
/// username/password credentials.
///
/// display: sensitive
#[serde(default)]
pub secret: String,
/// TURN secret to use that's read from the file path specified.
///
/// This takes priority over "tsecret" first, and falls back to
/// "secret" if invalid or failed to open.
///
/// example: "/etc/palpo/.turn_secret"
pub secret_file: Option<PathBuf>,
/// TURN TTL, in seconds.
///
/// default: 86400
#[serde(default = "default_ttl")]
pub ttl: u64,
/// Allow guests/unauthenticated users to access TURN credentials.
///
/// This is the equivalent of Synapse's `turn_allow_guests` config option.
/// This allows any unauthenticated user to call the endpoint
/// `/_matrix/client/v3/voip/turnServer`.
///
/// It is unlikely you need to enable this as all major clients support
/// authentication for this endpoint and prevents misuse of your TURN server
/// from potential bots.
#[serde(default)]
pub allow_guests: bool,
}
fn default_ttl() -> u64 {
60 * 60 * 24
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/config/account.rs | crates/server/src/config/account.rs | use serde::Deserialize;
use crate::core::serde::default_true;
use crate::macros::config_example;
#[config_example(filename = "palpo-example.toml", section = "admin")]
#[derive(Clone, Debug, Deserialize)]
pub struct AccountConfig {
}
impl Default for AccountConfig {
fn default() -> Self {
Self {
}
}
} | rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/federation/access_check.rs | crates/server/src/federation/access_check.rs | use crate::AppResult;
use crate::core::{EventId, MatrixError, RoomId, ServerName};
use crate::event::handler;
use crate::room::{self, state};
pub fn access_check(
origin: &ServerName,
room_id: &RoomId,
event_id: Option<&EventId>,
) -> AppResult<()> {
if !room::is_server_joined(origin, room_id)? {
return Err(MatrixError::forbidden(
format!("server `{origin}` is not in room `{room_id}`"),
None,
)
.into());
}
handler::acl_check(origin, room_id)?;
// let world_readable = crate::room::is_world_readable(room_id);
// if any user on our homeserver is trying to knock this room, we'll need to
// acknowledge bans or leaves
// let user_is_knocking = crate::room::members_knocked(room_id).count();
if let Some(event_id) = event_id
&& !state::server_can_see_event(origin, room_id, event_id)?
{
return Err(MatrixError::forbidden("server is not allowed to see event", None).into());
}
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/federation/membership.rs | crates/server/src/federation/membership.rs | mod join;
pub use join::*;
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/federation/membership/join.rs | crates/server/src/federation/membership/join.rs | use crate::core::events::StateEventType;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::federation::membership::{RoomStateV1, RoomStateV2};
use crate::core::identifiers::*;
use crate::core::serde::{CanonicalJsonValue, RawJsonValue, to_raw_json_value};
use crate::event::{gen_event_id_canonical_json, handler};
use crate::room::{state, timeline};
use crate::{AppResult, IsRemoteOrLocal, MatrixError, room, sending};
pub async fn send_join_v1(
origin: &ServerName,
room_id: &RoomId,
pdu: &RawJsonValue,
) -> AppResult<RoomStateV1> {
if !room::room_exists(room_id)? {
return Err(MatrixError::not_found("room is unknown to this server.").into());
}
handler::acl_check(origin, room_id)?;
// We need to return the state prior to joining, let's keep a reference to that here
let frame_id = room::get_frame_id(room_id, None).unwrap_or_default();
// We do not add the event_id field to the pdu here because of signature and hashes checks
let room_version_id = room::get_version(room_id)?;
let (event_id, mut value) = gen_event_id_canonical_json(pdu, &room_version_id)
.map_err(|_| MatrixError::invalid_param("could not convert event to canonical json"))?;
let event_room_id: OwnedRoomId = serde_json::from_value(
value
.get("room_id")
.ok_or_else(|| MatrixError::bad_json("event missing room_id property"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::bad_json(format!("room_id field is not a valid room id: {e}")))?;
if event_room_id != room_id {
return Err(
MatrixError::bad_json("event room_id does not match request path room id").into(),
);
}
let event_type: StateEventType = serde_json::from_value(
value
.get("type")
.ok_or_else(|| MatrixError::bad_json("event missing type property"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::bad_json(format!("event has invalid state event type: {e}")))?;
if event_type != StateEventType::RoomMember {
return Err(MatrixError::bad_json(
"Not allowed to send non-membership state event to join endpoint.",
)
.into());
}
let content: RoomMemberEventContent = serde_json::from_value(
value
.get("content")
.ok_or_else(|| MatrixError::bad_json("event missing content property"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::bad_json(format!("event content is empty or invalid: {e}")))?;
if content.membership != MembershipState::Join {
return Err(MatrixError::bad_json(
"not allowed to send a non-join membership event to join endpoint",
)
.into());
}
// ACL check sender user server name
let sender: OwnedUserId = serde_json::from_value(
value
.get("sender")
.ok_or_else(|| MatrixError::bad_json("event missing sender property"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::bad_json(format!("sender property is not a valid user id: {e}")))?;
if room::user::is_banned(&sender, room_id)? {
return Err(MatrixError::forbidden("user is banned from the room", None).into());
}
handler::acl_check(sender.server_name(), room_id)?;
// check if origin server is trying to send for another server
if sender.server_name() != origin {
return Err(MatrixError::forbidden(
"not allowed to join on behalf of another server",
None,
)
.into());
}
let state_key: OwnedUserId = serde_json::from_value(
value
.get("state_key")
.ok_or_else(|| MatrixError::bad_json("event missing state_key property"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::bad_json(format!("state key is not a valid user id: {e}")))?;
if state_key != sender {
return Err(MatrixError::bad_json("state key does not match sender user").into());
};
if let Some(authorising_user) = content.join_authorized_via_users_server {
use crate::core::RoomVersionId::*;
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
return Err(MatrixError::invalid_param(
"room version {room_version_id} does not support restricted rooms but \
join_authorised_via_users_server ({authorising_user}) was found in the event",
)
.into());
}
if !authorising_user.is_local() {
return Err(MatrixError::invalid_param(
"cannot authorise membership event through {authorising_user} as they do not \
belong to this homeserver",
)
.into());
}
if !room::user::is_joined(&authorising_user, room_id)? {
return Err(MatrixError::invalid_param(
"authorising user {authorising_user} is not in the room you are trying to join, \
they cannot authorise your join",
)
.into());
}
if !crate::federation::user_can_perform_restricted_join(
&state_key,
room_id,
&room_version_id,
None,
)
.await?
{
return Err(MatrixError::unable_to_authorize_join(
"joining user did not pass restricted room's rules",
)
.into());
}
}
crate::server_key::hash_and_sign_event(&mut value, &room_version_id)
.map_err(|e| MatrixError::invalid_param(format!("failed to sign send_join event: {e}")))?;
let origin: OwnedServerName = serde_json::from_value(
serde_json::to_value(
value
.get("origin")
.ok_or(MatrixError::invalid_param("event needs an origin field"))?,
)
.expect("CanonicalJson is valid json value"),
)
.map_err(|_| MatrixError::invalid_param("origin field is invalid"))?;
handler::process_incoming_pdu(
&origin,
&event_id,
room_id,
&room_version_id,
value.clone(),
true,
false,
)
.await?;
let state_ids = state::get_full_state_ids(frame_id)?;
let state = state_ids
.iter()
.filter_map(|(_, id)| timeline::get_pdu_json(id).ok().flatten())
.map(crate::sending::convert_to_outgoing_federation_event)
.collect();
let auth_chain_ids =
room::auth_chain::get_auth_chain_ids(room_id, state_ids.values().map(|id| &**id))?;
let auth_chain = auth_chain_ids
.into_iter()
.filter_map(|id| timeline::get_pdu_json(&id).ok().flatten())
.map(crate::sending::convert_to_outgoing_federation_event)
.collect();
if let Err(e) = sending::send_pdu_room(room_id, &event_id, &[], &[origin.to_owned()]) {
error!("failed to notify user joined to servers: {e}");
}
Ok(RoomStateV1 {
auth_chain,
state,
event: to_raw_json_value(&CanonicalJsonValue::Object(value)).ok(),
// event: None,
})
}
pub async fn send_join_v2(
origin: &ServerName,
room_id: &RoomId,
pdu: &RawJsonValue,
) -> AppResult<RoomStateV2> {
// let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
let RoomStateV1 {
auth_chain,
state,
event,
} = send_join_v1(origin, room_id, pdu).await?;
let room_state = RoomStateV2 {
members_omitted: false,
auth_chain,
state,
event,
servers_in_room: None,
};
Ok(room_state)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.