repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/appservice.rs | crates/server/src/routing/appservice.rs | //! (De)serializable types for the [Matrix Server-Server API][federation-api].
//! These types are used by server code.
//!
//! [federation-api]: https://spec.matrix.org/latest/application-service-api/
mod third_party;
mod transaction;
use salvo::prelude::*;
use crate::{EmptyResult, empty_ok};
pub fn router() -> Router {
Router::with_path("app").oapi_tag("appservice").push(
Router::with_path("v1")
.push(Router::with_path("ping").post(ping))
.push(Router::with_path("rooms/{room_alias}").get(query_rooms))
.push(Router::with_path("users/{user_id}").get(query_users))
.push(third_party::router())
.push(transaction::router()),
)
}
#[endpoint]
async fn ping() -> EmptyResult {
// TODO: ping
// let _authed = depot.authed_info()?;
empty_ok()
}
#[endpoint]
async fn query_rooms() -> EmptyResult {
// TODO: query_rooms
// let _authed = depot.authed_info()?;
empty_ok()
}
#[endpoint]
async fn query_users() -> EmptyResult {
// TODO: query_users
// let _authed = depot.authed_info()?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/media.rs | crates/server/src/routing/media.rs | use salvo::prelude::*;
use super::client::media::*;
use crate::hoops;
pub fn router() -> Router {
let mut media = Router::with_path("media").oapi_tag("media");
for v in ["v3", "v1", "r0"] {
media = media
.push(
Router::with_path(v)
.hoop(hoops::auth_by_access_token)
.push(Router::with_path("create").post(create_mxc_uri))
.push(
Router::with_path("upload").post(create_content).push(
Router::with_path("{server_name}/{media_id}").put(upload_content),
),
)
.push(
Router::with_hoop(hoops::limit_rate)
.push(Router::with_path("config").get(get_config))
.push(Router::with_path("preview_url").get(preview_url)),
),
)
.push(
Router::with_path(v)
.push(
Router::with_path("download/{server_name}/{media_id}")
.get(get_content)
.push(Router::with_path("{filename}").get(get_content_with_filename)),
)
.push(Router::with_hoop(hoops::limit_rate).push(
Router::with_path("thumbnail/{server_name}/{media_id}").get(get_thumbnail),
)),
)
}
media
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client.rs | crates/server/src/routing/client.rs | mod account;
mod admin;
mod appservice;
mod auth;
mod device;
mod directory;
mod key;
mod oidc;
mod presence;
mod profile;
mod push_rule;
mod pusher;
mod register;
mod room;
mod room_key;
mod session;
pub mod sync_msc4186;
mod sync_v3;
mod third_party;
mod to_device;
mod unstable;
mod user;
mod user_directory;
mod voip;
pub(crate) mod media;
use std::collections::BTreeMap;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::config;
use crate::core::client::discovery::{
capabilities::{
Capabilities, CapabilitiesResBody, ChangePasswordCapability, RoomVersionStability,
RoomVersionsCapability, SetAvatarUrlCapability, SetDisplayNameCapability,
ThirdPartyIdChangesCapability,
},
versions::VersionsResBody,
};
use crate::core::client::search::{ResultCategories, SearchReqArgs, SearchReqBody, SearchResBody};
use crate::routing::prelude::*;
pub fn router() -> Router {
let mut client = Router::with_path("client").oapi_tag("client");
for v in ["v3", "v1", "r0"] {
client = client
.push(
Router::with_path(v)
.push(account::public_router())
.push(profile::public_router())
.push(register::public_router())
.push(session::public_router())
.push(room::public_router())
.push(directory::public_router())
.push(media::self_auth_router())
.push(
Router::with_path("publicRooms")
.get(room::get_public_rooms)
.post(room::get_filtered_public_rooms),
),
)
.push(
Router::with_path(v)
.hoop(hoops::auth_by_access_token)
.push(account::authed_router())
.push(register::authed_router())
.push(session::authed_router())
.push(device::authed_router())
.push(room_key::authed_router())
.push(room::authed_router())
.push(user::authed_router())
.push(directory::authed_router())
.push(user_directory::authed_router())
.push(key::authed_router())
.push(profile::authed_router())
.push(voip::authed_router())
.push(appservice::authed_router())
.push(admin::authed_router())
.push(third_party::authed_router())
.push(to_device::authed_router())
.push(auth::authed_router())
.push(pusher::authed_router())
.push(push_rule::authed_router())
.push(presence::authed_router())
.push(Router::with_path("joined_rooms").get(room::membership::joined_rooms))
.push(
Router::with_path("join/{room_id_or_alias}")
.post(room::membership::join_room_by_id_or_alias),
)
.push(Router::with_path("createRoom").post(room::create_room))
.push(Router::with_path("notifications").get(get_notifications))
.push(Router::with_path("sync").get(sync_v3::sync_events_v3))
.push(
Router::with_path("dehydrated_device")
.get(device::dehydrated)
.put(device::upsert_dehydrated)
.delete(device::delete_dehydrated)
.push(
Router::with_path("{device_id}/events")
.post(to_device::for_dehydrated),
),
),
)
.push(
Router::with_path(v)
.hoop(hoops::limit_rate)
.hoop(hoops::auth_by_access_token)
.push(Router::with_path("search").post(search))
.push(Router::with_path("capabilities").get(get_capabilities))
.push(Router::with_path("knock/{room_id_or_alias}").post(room::knock_room)),
)
}
client
.push(Router::with_path("versions").get(supported_versions))
.push(
Router::with_path("oidc")
.push(Router::with_path("status").get(oidc::oidc_status))
.push(Router::with_path("auth").get(oidc::oidc_auth))
.push(Router::with_path("callback").get(oidc::oidc_callback))
.push(Router::with_path("login").post(oidc::oidc_login)),
)
.push(unstable::router())
}
/// #POST /_matrix/client/r0/search
/// Searches rooms for messages.
///
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
#[endpoint]
fn search(
_aa: AuthArgs,
args: SearchReqArgs,
body: JsonBody<SearchReqBody>,
depot: &mut Depot,
) -> JsonResult<SearchResBody> {
let authed = depot.authed_info()?;
let search_criteria = body.search_categories.room_events.as_ref().unwrap();
let room_events = crate::event::search::search_pdus(
authed.user_id(),
search_criteria,
args.next_batch.as_deref(),
)?;
json_ok(SearchResBody::new(ResultCategories { room_events }))
}
/// #GET /_matrix/client/r0/capabilities
/// Get information on the supported feature set and other relevent capabilities of this server.
#[endpoint]
fn get_capabilities(_aa: AuthArgs) -> JsonResult<CapabilitiesResBody> {
let mut available = BTreeMap::new();
let conf = crate::config::get();
for room_version in &*config::UNSTABLE_ROOM_VERSIONS {
available.insert(room_version.clone(), RoomVersionStability::Unstable);
}
for room_version in &*config::STABLE_ROOM_VERSIONS {
available.insert(room_version.clone(), RoomVersionStability::Stable);
}
json_ok(CapabilitiesResBody {
capabilities: Capabilities {
room_versions: RoomVersionsCapability {
default: conf.default_room_version.clone(),
available,
},
// TODO: use config values
change_password: ChangePasswordCapability { enabled: true },
set_display_name: SetDisplayNameCapability { enabled: true },
set_avatar_url: SetAvatarUrlCapability { enabled: true },
thirdparty_id_changes: ThirdPartyIdChangesCapability { enabled: true },
..Default::default()
},
})
}
/// #GET /_matrix/client/versions
/// Get the versions of the specification and unstable features supported by this server.
///
/// - Versions take the form MAJOR.MINOR.PATCH
/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
/// - Unstable features are namespaced and may include version information in their name
///
/// Note: Unstable features are used while developing new features. Clients should avoid using
/// unstable features in their stable releases
#[endpoint]
fn supported_versions() -> JsonResult<VersionsResBody> {
json_ok(VersionsResBody {
versions: vec![
"r0.5.0".to_owned(),
"r0.6.0".to_owned(),
"v1.1".to_owned(),
"v1.2".to_owned(),
"v1.3".to_owned(),
"v1.4".to_owned(),
"v1.5".to_owned(),
"v1.6".to_owned(),
"v1.7".to_owned(),
"v1.8".to_owned(),
"v1.9".to_owned(),
"v1.10".to_owned(),
"v1.11".to_owned(),
"v1.12".to_owned(),
],
unstable_features: BTreeMap::from_iter([
("org.matrix.e2e_cross_signing".to_owned(), true),
("org.matrix.msc2285.stable".to_owned(), true), /* private read receipts (https://github.com/matrix-org/matrix-spec-proposals/pull/2285) */
("uk.half-shot.msc2666.query_mutual_rooms".to_owned(), true), /* query mutual rooms (https://github.com/matrix-org/matrix-spec-proposals/pull/2666) */
("org.matrix.msc2836".to_owned(), true), /* threading/threads (https://github.com/matrix-org/matrix-spec-proposals/pull/2836) */
("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */
("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */
("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */
("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */
("org.matrix.msc3575".to_owned(), true), /* sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/3575/files#r1588877046) */
("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */
("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
]),
})
}
#[endpoint]
fn get_notifications(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
// TODO: get_notifications
let _authed = depot.authed_info()?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation.rs | crates/server/src/routing/federation.rs | //! (De)serializable types for the [Matrix Server-Server API][federation-api].
//! These types are used by server code.
//!
//! [federation-api]: https://spec.matrix.org/latest/server-server-api/
mod backfill;
mod event;
pub(super) mod key;
mod media;
mod membership;
mod openid;
mod query;
mod room;
mod space;
mod threepid;
mod transaction;
mod user;
use salvo::prelude::*;
use crate::core::directory::Server;
use crate::core::federation::directory::ServerVersionResBody;
use crate::{
AppError, AppResult, AuthArgs, EmptyResult, JsonResult, config, empty_ok, hoops, json_ok,
};
pub fn router() -> Router {
Router::with_path("federation")
.hoop(check_federation_enabled)
.hoop(hoops::auth_by_access_token_or_signatures)
.oapi_tag("federation")
.push(
Router::with_path("v2")
.push(backfill::router())
.push(event::router())
.push(membership::router_v2())
.push(openid::router())
.push(query::router())
.push(room::router())
.push(space::router())
.push(threepid::router())
.push(transaction::router())
.push(user::router())
.push(Router::with_path("version").post(version)),
)
.push(
Router::with_path("v1")
.push(backfill::router())
.push(event::router())
.push(membership::router_v1())
.push(openid::router())
.push(query::router())
.push(room::router())
.push(space::router())
.push(threepid::router())
.push(transaction::router())
.push(user::router())
.push(media::router())
.push(Router::with_path("version").post(version)),
)
.push(Router::with_path("versions").get(get_versions))
}
#[handler]
async fn check_federation_enabled() -> AppResult<()> {
let conf = config::get();
if conf.enabled_federation().is_none() {
Err(AppError::public("Federation is disabled."))
} else {
Ok(())
}
}
#[endpoint]
async fn get_versions(_aa: AuthArgs) -> EmptyResult {
//TODO: https://github.com/matrix-org/matrix-spec-proposals/pull/3723
empty_ok()
}
/// #GET /_matrix/federation/v1/version
/// Get version information on this server.
#[endpoint]
async fn version() -> JsonResult<ServerVersionResBody> {
json_ok(ServerVersionResBody {
server: Some(Server {
name: Some("Palpo".to_owned()),
version: Some(env!("CARGO_PKG_VERSION").to_owned()),
}),
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin.rs | crates/server/src/routing/admin.rs | mod event;
mod federation;
mod media;
mod register;
mod room;
mod scheduled_task;
mod server_notice;
mod statistic;
mod user;
mod user_admin;
mod user_lookup;
use salvo::prelude::*;
use crate::routing::prelude::*;
/// Middleware to require admin privileges
#[handler]
pub async fn require_admin(depot: &mut Depot) -> AppResult<()> {
let authed = depot.authed_info()?;
if !authed.is_admin() {
return Err(MatrixError::forbidden("Requires admin privileges", None).into());
}
Ok(())
}
pub fn router() -> Router {
let mut admin = Router::new().oapi_tag("admin");
for v in ["_palpo/admin", "_synapse/admin"] {
admin = admin.push(
Router::with_path(v)
.hoop(crate::hoops::auth_by_access_token)
.hoop(require_admin)
.get(home)
.push(event::router())
.push(federation::router())
.push(media::router())
.push(register::router())
.push(room::router())
.push(scheduled_task::router())
.push(server_notice::router())
.push(statistic::router())
.push(user::router())
.push(user_admin::router())
.push(user_lookup::router()),
)
}
admin
}
#[handler]
async fn home() -> &'static str {
"Palpo Admin API"
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/appservice/third_party.rs | crates/server/src/routing/appservice/third_party.rs | use salvo::prelude::*;
use crate::core::third_party::*;
use crate::{AuthArgs, JsonResult, json_ok};
pub fn router() -> Router {
Router::with_path("thirdparty")
.push(Router::with_path("protocol/{protocol}").get(protocol))
.push(
Router::with_path("location")
.get(locations)
.push(Router::with_path("{protocol}").get(protocol_locations)),
)
.push(
Router::with_path("user")
.get(users)
.push(Router::with_path("{protocol}").get(protocol_users)),
)
}
#[endpoint]
async fn protocols(_aa: AuthArgs) -> JsonResult<ProtocolsResBody> {
// TODO: LATER
json_ok(ProtocolsResBody::default())
}
#[endpoint]
async fn protocol(_aa: AuthArgs) -> JsonResult<Option<ProtocolResBody>> {
// TODO: LATER
json_ok(None)
}
#[endpoint]
async fn locations(_aa: AuthArgs) -> JsonResult<LocationsResBody> {
// TODO: LATER
json_ok(LocationsResBody::default())
}
#[endpoint]
async fn protocol_locations(_aa: AuthArgs) -> JsonResult<LocationsResBody> {
// TODO: LATER
json_ok(LocationsResBody::default())
}
#[endpoint]
async fn users(_aa: AuthArgs, _req: &mut Request) -> JsonResult<UsersResBody> {
// TODO: LATER
json_ok(UsersResBody::default())
}
#[endpoint]
async fn protocol_users(_aa: AuthArgs, _req: &mut Request) -> JsonResult<UsersResBody> {
// TODO: LATER
json_ok(UsersResBody::default())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/appservice/transaction.rs | crates/server/src/routing/appservice/transaction.rs | use salvo::prelude::*;
use crate::AuthArgs;
use crate::{EmptyResult, empty_ok};
pub fn router() -> Router {
Router::with_path("transactions/{txn_id}").put(send_event)
}
#[endpoint]
async fn send_event(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/user.rs | crates/server/src/routing/admin/user.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde::Deserialize;
use crate::core::client::device::Device;
use crate::core::identifiers::*;
use crate::{EmptyResult, JsonResult, MatrixError, data, empty_ok, json_ok};
#[derive(Debug, Deserialize, ToSchema)]
pub struct UpdateDeviceReqBody {
#[serde(default)]
pub display_name: Option<String>,
}
pub fn router() -> Router {
Router::with_path("v2").push(
Router::with_path("users/{user_id}/devices/{device_id}")
.get(get_device)
.put(put_device)
.delete(delete_device),
)
}
#[handler]
pub fn get_device(
user_id: PathParam<OwnedUserId>,
device_id: PathParam<OwnedDeviceId>,
) -> JsonResult<Device> {
let Ok(device) = data::user::device::get_device(&user_id, &device_id) else {
return Err(MatrixError::not_found("device is not found.").into());
};
json_ok(device.into_matrix_device())
}
#[handler]
pub fn put_device(
user_id: PathParam<OwnedUserId>,
device_id: PathParam<OwnedDeviceId>,
body: JsonBody<UpdateDeviceReqBody>,
) -> JsonResult<Device> {
let body = body.into_inner();
let update = data::user::device::DeviceUpdate {
display_name: Some(body.display_name),
user_agent: None,
last_seen_ip: None,
last_seen_at: None,
};
let Ok(device) = data::user::device::update_device(&user_id, &device_id, update) else {
return Err(MatrixError::not_found("device is not found.").into());
};
json_ok(device.into_matrix_device())
}
#[handler]
pub fn delete_device(
user_id: PathParam<OwnedUserId>,
device_id: PathParam<OwnedDeviceId>,
) -> EmptyResult {
data::user::device::remove_device(&user_id, &device_id)?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/event.rs | crates/server/src/routing/admin/event.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::federation::authorization::{EventAuthReqArgs, EventAuthResBody};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
};
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::room::{state, timeline};
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, json_ok,
};
pub fn router() -> Router {
Router::new()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/register.rs | crates/server/src/routing/admin/register.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde::Serialize;
use crate::core::UnixMillis;
use crate::core::federation::authorization::{EventAuthReqArgs, EventAuthResBody};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
};
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::room::{state, timeline};
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, json_ok,
user,
};
pub fn router() -> Router {
Router::new().push(Router::with_path("username_available").get(check_username_available))
}
#[derive(Serialize, ToSchema, Debug, Clone)]
struct AvailableResBody {
available: bool,
}
/// An admin API to check if a given username is available, regardless of whether registration is enabled.
#[endpoint]
fn check_username_available(
_aa: AuthArgs,
username: QueryParam<String, true>,
) -> JsonResult<AvailableResBody> {
if !user::is_username_available(&username)? {
Err(MatrixError::user_in_use("desired user id is invalid or already taken").into())
} else {
json_ok(AvailableResBody { available: true })
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/user_lookup.rs | crates/server/src/routing/admin/user_lookup.rs | //! User lookup endpoints for MAS integration
//!
//! - GET /_synapse/admin/v1/auth_providers/{provider}/users/{external_id}
//! - GET /_synapse/admin/v1/threepid/{medium}/users/{address}
use salvo::oapi::extract::PathParam;
use salvo::prelude::*;
use serde::Serialize;
use crate::routing::prelude::*;
/// Response for user lookup endpoints
#[derive(Debug, Serialize, ToSchema)]
pub struct UserIdResponse {
pub user_id: String,
}
/// GET /_synapse/admin/v1/auth_providers/{provider}/users/{external_id}
///
/// Find a user based on an external ID from an auth provider (SSO/OIDC)
#[endpoint]
pub async fn get_user_by_external_id(
provider: PathParam<String>,
external_id: PathParam<String>,
) -> JsonResult<UserIdResponse> {
let provider = provider.into_inner();
let external_id = external_id.into_inner();
let user_id = crate::data::user::get_user_by_external_id(&provider, &external_id)?
.ok_or_else(|| MatrixError::not_found("User not found"))?;
json_ok(UserIdResponse {
user_id: user_id.to_string(),
})
}
/// GET /_synapse/admin/v1/threepid/{medium}/users/{address}
///
/// Find a user based on 3PID (email, phone, etc.)
#[endpoint]
pub async fn get_user_by_threepid(
medium: PathParam<String>,
address: PathParam<String>,
) -> JsonResult<UserIdResponse> {
let medium = medium.into_inner();
let address = address.into_inner();
let user_id = crate::data::user::get_user_by_threepid(&medium, &address)?
.ok_or_else(|| MatrixError::not_found("User not found"))?;
json_ok(UserIdResponse {
user_id: user_id.to_string(),
})
}
pub fn router() -> Router {
Router::new()
.push(
Router::with_path("v1/auth_providers/{provider}/users/{external_id}")
.get(get_user_by_external_id),
)
.push(Router::with_path("v1/threepid/{medium}/users/{address}").get(get_user_by_threepid))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/user_admin.rs | crates/server/src/routing/admin/user_admin.rs | //! Synapse Admin API - User Management
//!
//! Phase 1 (MAS Critical):
//! - GET/PUT /_synapse/admin/v2/users/{user_id}
//! - GET /_synapse/admin/v2/users
//! - GET /_synapse/admin/v3/users
//! - POST /_synapse/admin/v1/users/{user_id}/_allow_cross_signing_replacement_without_uia
//!
//! Phase 2 (User Management):
//! - POST /_synapse/admin/v1/deactivate/{user_id}
//! - POST /_synapse/admin/v1/reset_password/{user_id}
//! - GET/PUT /_synapse/admin/v1/users/{user_id}/admin
//! - POST/DELETE /_synapse/admin/v1/users/{user_id}/shadow_ban
use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde::{Deserialize, Serialize};
use crate::core::identifiers::*;
use crate::{EmptyResult, JsonResult, MatrixError, data, empty_ok, json_ok, user};
// ============================================================================
// Response/Request Types
// ============================================================================
/// User info for admin API v2
#[derive(Debug, Serialize, ToSchema)]
pub struct UserInfoV2 {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub displayname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub threepids: Option<Vec<ThreepidInfo>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub avatar_url: Option<String>,
pub is_guest: bool,
pub admin: bool,
pub deactivated: bool,
pub shadow_banned: bool,
pub locked: bool,
pub creation_ts: i64,
#[serde(skip_serializing_if = "Option::is_none")]
pub appservice_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub consent_version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub consent_ts: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub consent_server_notice_sent: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub user_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub external_ids: Option<Vec<ExternalIdInfo>>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct ThreepidInfo {
pub medium: String,
pub address: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub added_at: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub validated_at: Option<i64>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct ExternalIdInfo {
pub auth_provider: String,
pub external_id: String,
}
/// Request body for PUT /v2/users/{user_id}
#[derive(Debug, Deserialize, ToSchema)]
pub struct PutUserReqBody {
#[serde(default)]
pub password: Option<String>,
#[serde(default)]
pub logout_devices: Option<bool>,
#[serde(default)]
pub displayname: Option<String>,
#[serde(default)]
pub avatar_url: Option<String>,
#[serde(default)]
pub threepids: Option<Vec<ThreepidInfo>>,
#[serde(default)]
pub external_ids: Option<Vec<ExternalIdInfo>>,
#[serde(default)]
pub admin: Option<bool>,
#[serde(default)]
pub deactivated: Option<bool>,
#[serde(default)]
pub locked: Option<bool>,
#[serde(default)]
pub user_type: Option<String>,
}
/// Response for user list
#[derive(Debug, Serialize, ToSchema)]
pub struct UsersListResponse {
pub users: Vec<UserInfoV2>,
#[serde(skip_serializing_if = "Option::is_none")]
pub next_token: Option<String>,
pub total: i64,
}
/// Request for deactivate
#[derive(Debug, Deserialize, ToSchema)]
pub struct DeactivateReqBody {
#[serde(default)]
pub erase: Option<bool>,
}
/// Request for reset password
#[derive(Debug, Deserialize, ToSchema)]
pub struct ResetPasswordReqBody {
pub new_password: String,
#[serde(default)]
pub logout_devices: Option<bool>,
}
/// Request for suspend
#[derive(Debug, Deserialize, ToSchema)]
pub struct SuspendReqBody {
pub suspend: bool,
}
/// Response for suspend
#[derive(Debug, Serialize, ToSchema)]
pub struct SuspendResponse {
pub user_id: String,
pub suspended: bool,
}
/// Response for admin status
#[derive(Debug, Serialize, ToSchema)]
pub struct AdminStatusResponse {
pub admin: bool,
}
/// Request for admin status
#[derive(Debug, Deserialize, ToSchema)]
pub struct AdminStatusReqBody {
pub admin: bool,
}
/// Response for cross signing
#[derive(Debug, Serialize, ToSchema)]
pub struct CrossSigningResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub updatable_without_uia_before_ms: Option<i64>,
}
// ============================================================================
// Phase 2 Types
// ============================================================================
/// Response for whois
#[derive(Debug, Serialize, ToSchema)]
pub struct WhoisResponse {
pub user_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub devices: Option<std::collections::HashMap<String, WhoisDeviceInfo>>,
}
#[derive(Debug, Serialize, ToSchema)]
pub struct WhoisDeviceInfo {
#[serde(skip_serializing_if = "Option::is_none")]
pub sessions: Option<Vec<WhoisSessionInfo>>,
}
#[derive(Debug, Serialize, ToSchema)]
pub struct WhoisSessionInfo {
#[serde(skip_serializing_if = "Option::is_none")]
pub connections: Option<Vec<WhoisConnectionInfo>>,
}
#[derive(Debug, Serialize, ToSchema)]
pub struct WhoisConnectionInfo {
pub ip: Option<String>,
pub last_seen: Option<i64>,
pub user_agent: Option<String>,
}
/// Response for joined_rooms
#[derive(Debug, Serialize, ToSchema)]
pub struct JoinedRoomsResponse {
pub joined_rooms: Vec<String>,
pub total: i64,
}
/// Response for pushers
#[derive(Debug, Serialize, ToSchema)]
pub struct PushersResponse {
pub pushers: Vec<serde_json::Value>,
pub total: i64,
}
/// Response for account data
#[derive(Debug, Serialize, ToSchema)]
pub struct AccountDataResponse {
pub account_data: AccountDataContent,
}
#[derive(Debug, Serialize, ToSchema)]
pub struct AccountDataContent {
pub global: std::collections::HashMap<String, serde_json::Value>,
pub rooms:
std::collections::HashMap<String, std::collections::HashMap<String, serde_json::Value>>,
}
/// Request for ratelimit
#[derive(Debug, Deserialize, ToSchema)]
pub struct RateLimitReqBody {
#[serde(default)]
pub messages_per_second: Option<i32>,
#[serde(default)]
pub burst_count: Option<i32>,
}
/// Response for ratelimit
#[derive(Debug, Serialize, ToSchema)]
pub struct RateLimitResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub messages_per_second: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub burst_count: Option<i32>,
}
// ============================================================================
// Helper functions
// ============================================================================
fn build_user_info(user_id: &UserId) -> crate::AppResult<UserInfoV2> {
let db_user =
data::user::get_user(user_id).map_err(|_| MatrixError::not_found("User not found"))?;
let display_name = data::user::display_name(user_id).ok().flatten();
let avatar_url = data::user::avatar_url(user_id).ok().flatten();
let threepids = data::user::get_threepids(user_id).ok().map(|tps| {
tps.into_iter()
.map(|tp| ThreepidInfo {
medium: tp.medium,
address: tp.address,
added_at: Some(tp.added_at.get() as i64),
validated_at: Some(tp.validated_at.get() as i64),
})
.collect()
});
let external_ids = data::user::get_external_ids_by_user(user_id)
.ok()
.map(|eids| {
eids.into_iter()
.map(|eid| ExternalIdInfo {
auth_provider: eid.auth_provider,
external_id: eid.external_id,
})
.collect()
});
Ok(UserInfoV2 {
name: user_id.to_string(),
displayname: display_name,
threepids,
avatar_url: avatar_url.map(|u| u.to_string()),
is_guest: db_user.is_guest,
admin: db_user.is_admin,
deactivated: db_user.deactivated_at.is_some(),
shadow_banned: db_user.shadow_banned,
locked: db_user.locked_at.is_some(),
creation_ts: db_user.created_at.get() as i64,
appservice_id: db_user.appservice_id,
consent_version: db_user.consent_version,
consent_ts: db_user.consent_at.map(|t| t.get() as i64),
consent_server_notice_sent: db_user.consent_server_notice_sent,
user_type: db_user.ty,
external_ids,
})
}
fn build_users_list(filter: &data::user::ListUsersFilter) -> crate::AppResult<UsersListResponse> {
let (users, total) = data::user::list_users(filter)?;
let limit = filter.limit.unwrap_or(100) as usize;
let from = filter.from.unwrap_or(0) as usize;
let user_infos: Vec<UserInfoV2> = users
.into_iter()
.map(|db_user| {
let uid = &db_user.id;
let display_name = data::user::display_name(uid).ok().flatten();
let avatar_url = data::user::avatar_url(uid).ok().flatten();
UserInfoV2 {
name: uid.to_string(),
displayname: display_name,
threepids: None, // Not included in list response for performance
avatar_url: avatar_url.map(|u| u.to_string()),
is_guest: db_user.is_guest,
admin: db_user.is_admin,
deactivated: db_user.deactivated_at.is_some(),
shadow_banned: db_user.shadow_banned,
locked: db_user.locked_at.is_some(),
creation_ts: db_user.created_at.get() as i64,
appservice_id: db_user.appservice_id,
consent_version: db_user.consent_version,
consent_ts: db_user.consent_at.map(|t| t.get() as i64),
consent_server_notice_sent: db_user.consent_server_notice_sent,
user_type: db_user.ty,
external_ids: None,
}
})
.collect();
let next_token = if user_infos.len() >= limit {
Some((from + user_infos.len()).to_string())
} else {
None
};
Ok(UsersListResponse {
users: user_infos,
next_token,
total,
})
}
// ============================================================================
// Phase 1: MAS Critical Endpoints
// ============================================================================
/// GET /_synapse/admin/v2/users/{user_id}
///
/// Get details of a single user
#[endpoint]
pub async fn get_user_v2(user_id: PathParam<OwnedUserId>) -> JsonResult<UserInfoV2> {
let user_id = user_id.into_inner();
json_ok(build_user_info(&user_id)?)
}
/// PUT /_synapse/admin/v2/users/{user_id}
///
/// Create or modify a user
#[endpoint]
pub async fn put_user_v2(
user_id: PathParam<OwnedUserId>,
body: JsonBody<PutUserReqBody>,
) -> JsonResult<UserInfoV2> {
let user_id = user_id.into_inner();
let body = body.into_inner();
// Check if user exists
let user_exists = data::user::user_exists(&user_id).unwrap_or(false);
if !user_exists {
// Create new user
user::create_user(user_id.clone(), body.password.as_deref())?;
} else {
// Update password if provided
if let Some(password) = &body.password {
crate::user::set_password(&user_id, password)?;
// Logout devices if requested
if body.logout_devices.unwrap_or(true) {
data::user::remove_all_devices(&user_id)?;
}
}
}
// Update display name
if let Some(display_name) = &body.displayname {
data::user::set_display_name(&user_id, display_name)?;
}
// Update threepids
if let Some(threepids) = body.threepids {
let entries: Vec<(String, String, Option<i64>, Option<i64>)> = threepids
.into_iter()
.map(|tp| (tp.medium, tp.address, tp.added_at, tp.validated_at))
.collect();
data::user::replace_threepids(&user_id, &entries)?;
}
// Update avatar
if let Some(avatar_url) = &body.avatar_url {
if let Ok(mxc_uri) = <&MxcUri>::try_from(avatar_url.as_str()) {
data::user::set_avatar_url(&user_id, mxc_uri)?;
}
}
// Update admin status
if let Some(admin) = body.admin {
data::user::set_admin(&user_id, admin)?;
}
// Update deactivated status
if let Some(deactivated) = body.deactivated {
if deactivated {
data::user::deactivate(&user_id)?;
}
}
// Update locked status
if let Some(locked) = body.locked {
data::user::set_locked(&user_id, locked, None)?;
}
// Update user type
if let Some(user_type) = body.user_type {
data::user::set_user_type(&user_id, Some(user_type.as_str()))?;
}
// Update external IDs if provided
if let Some(external_ids) = body.external_ids {
let ids: Vec<(String, String)> = external_ids
.into_iter()
.map(|eid| (eid.auth_provider, eid.external_id))
.collect();
data::user::replace_external_ids(&user_id, &ids)?;
}
// Return updated user info
json_ok(build_user_info(&user_id)?)
}
/// GET /_synapse/admin/v2/users
///
/// List all users with pagination and filtering
#[endpoint]
pub async fn list_users_v2(
from: QueryParam<i64, false>,
limit: QueryParam<i64, false>,
user_id: QueryParam<String, false>,
name: QueryParam<String, false>,
guests: QueryParam<bool, false>,
deactivated: QueryParam<bool, false>,
admins: QueryParam<bool, false>,
order_by: QueryParam<String, false>,
dir: QueryParam<String, false>,
) -> JsonResult<UsersListResponse> {
let name_filter = name.into_inner().or(user_id.into_inner());
let filter = data::user::ListUsersFilter {
from: from.into_inner(),
limit: limit.into_inner(),
name: name_filter,
guests: guests.into_inner(),
deactivated: deactivated.into_inner(),
admins: admins.into_inner(),
user_types: None,
order_by: order_by.into_inner(),
dir: dir.into_inner(),
};
json_ok(build_users_list(&filter)?)
}
/// GET /_synapse/admin/v3/users
///
/// Same as v2 but with different deactivated parameter handling
#[endpoint]
pub async fn list_users_v3(
from: QueryParam<i64, false>,
limit: QueryParam<i64, false>,
user_id: QueryParam<String, false>,
name: QueryParam<String, false>,
guests: QueryParam<bool, false>,
deactivated: QueryParam<bool, false>,
admins: QueryParam<bool, false>,
order_by: QueryParam<String, false>,
dir: QueryParam<String, false>,
) -> JsonResult<UsersListResponse> {
// v3 uses deactivated=true/false differently
// In v2, deactivated=true means show only deactivated users
// In v3, not_deactivated parameter is used instead
let name_filter = name.into_inner().or(user_id.into_inner());
let filter = data::user::ListUsersFilter {
from: from.into_inner(),
limit: limit.into_inner(),
name: name_filter,
guests: guests.into_inner(),
deactivated: deactivated.into_inner(),
admins: admins.into_inner(),
user_types: None,
order_by: order_by.into_inner(),
dir: dir.into_inner(),
};
json_ok(build_users_list(&filter)?)
}
/// POST /_synapse/admin/v1/users/{user_id}/_allow_cross_signing_replacement_without_uia
///
/// Allow a user to replace cross-signing keys without UIA
/// This replacement is permitted for 10 minutes
#[endpoint]
pub async fn allow_cross_signing_replacement(
user_id: PathParam<OwnedUserId>,
) -> JsonResult<CrossSigningResponse> {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
// Check if user has a master cross-signing key
if !data::user::key::has_master_cross_signing_key(&user_id)? {
return Err(MatrixError::not_found("User has no master cross-signing key").into());
}
// Allow replacement for 10 minutes
let now_ms = crate::core::UnixMillis::now().get() as i64;
let expires_ts = now_ms + 10 * 60 * 1000;
data::user::key::set_cross_signing_replacement_allowed(&user_id, expires_ts)?;
json_ok(CrossSigningResponse {
updatable_without_uia_before_ms: Some(expires_ts),
})
}
// ============================================================================
// Phase 2: User Management Endpoints
// ============================================================================
/// POST /_synapse/admin/v1/deactivate/{user_id}
///
/// Deactivate a user account
#[endpoint]
pub async fn deactivate_user(
user_id: PathParam<OwnedUserId>,
body: JsonBody<DeactivateReqBody>,
) -> EmptyResult {
let user_id = user_id.into_inner();
let body = body.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
// Get all joined rooms before deactivation
let joined_rooms = data::user::joined_rooms(&user_id)?;
// Perform full deactivation
user::full_user_deactivate(&user_id, &joined_rooms).await?;
// Erase user data if requested
if body.erase.unwrap_or(false) {
// Delete all media
user::delete_all_media(&user_id).await?;
}
empty_ok()
}
/// POST /_synapse/admin/v1/reset_password/{user_id}
///
/// Reset a user's password
#[endpoint]
pub async fn reset_password(
user_id: PathParam<OwnedUserId>,
body: JsonBody<ResetPasswordReqBody>,
) -> EmptyResult {
let user_id = user_id.into_inner();
let body = body.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
// Hash and set new password
crate::user::set_password(&user_id, &body.new_password)?;
// Logout all devices if requested (default true)
if body.logout_devices.unwrap_or(true) {
data::user::remove_all_devices(&user_id)?;
}
empty_ok()
}
/// GET /_synapse/admin/v1/users/{user_id}/admin
///
/// Get admin status of a user
#[endpoint]
pub async fn get_admin_status(user_id: PathParam<OwnedUserId>) -> JsonResult<AdminStatusResponse> {
let user_id = user_id.into_inner();
let is_admin =
data::user::is_admin(&user_id).map_err(|_| MatrixError::not_found("User not found"))?;
json_ok(AdminStatusResponse { admin: is_admin })
}
/// PUT /_synapse/admin/v1/users/{user_id}/admin
///
/// Set admin status of a user
#[endpoint]
pub async fn set_admin_status(
user_id: PathParam<OwnedUserId>,
body: JsonBody<AdminStatusReqBody>,
) -> EmptyResult {
let user_id = user_id.into_inner();
let body = body.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
data::user::set_admin(&user_id, body.admin)?;
empty_ok()
}
/// POST /_synapse/admin/v1/users/{user_id}/shadow_ban
///
/// Shadow ban a user
#[endpoint]
pub async fn shadow_ban_user(user_id: PathParam<OwnedUserId>) -> EmptyResult {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
data::user::set_shadow_banned(&user_id, true)?;
empty_ok()
}
/// DELETE /_synapse/admin/v1/users/{user_id}/shadow_ban
///
/// Remove shadow ban from a user
#[endpoint]
pub async fn unshadow_ban_user(user_id: PathParam<OwnedUserId>) -> EmptyResult {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
data::user::set_shadow_banned(&user_id, false)?;
empty_ok()
}
/// PUT /_synapse/admin/v1/suspend/{user_id}
///
/// Suspend or unsuspend a user
#[endpoint]
pub async fn suspend_user(
user_id: PathParam<OwnedUserId>,
body: JsonBody<SuspendReqBody>,
) -> JsonResult<SuspendResponse> {
let user_id = user_id.into_inner();
let body = body.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
data::user::set_suspended(&user_id, body.suspend)?;
json_ok(SuspendResponse {
user_id: user_id.to_string(),
suspended: body.suspend,
})
}
// ============================================================================
// Phase 2: Extended User Management
// ============================================================================
/// GET /_synapse/admin/v1/whois/{user_id}
///
/// Get information about a user's sessions
#[endpoint]
pub async fn whois_user(user_id: PathParam<OwnedUserId>) -> JsonResult<WhoisResponse> {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
// Get user's devices and session info
let devices = data::user::device::get_devices(&user_id)?;
let mut device_map = std::collections::HashMap::new();
for device in devices {
let connections = vec![WhoisConnectionInfo {
ip: device.last_seen_ip,
last_seen: device.last_seen_at.map(|t| t.get() as i64),
user_agent: device.user_agent,
}];
device_map.insert(
device.device_id.to_string(),
WhoisDeviceInfo {
sessions: Some(vec![WhoisSessionInfo {
connections: Some(connections),
}]),
},
);
}
json_ok(WhoisResponse {
user_id: user_id.to_string(),
devices: if device_map.is_empty() {
None
} else {
Some(device_map)
},
})
}
/// GET /_synapse/admin/v1/users/{user_id}/joined_rooms
///
/// Get list of rooms a user has joined
#[endpoint]
pub async fn user_joined_rooms(user_id: PathParam<OwnedUserId>) -> JsonResult<JoinedRoomsResponse> {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
let rooms = data::user::joined_rooms(&user_id)?;
let total = rooms.len() as i64;
json_ok(JoinedRoomsResponse {
joined_rooms: rooms.into_iter().map(|r| r.to_string()).collect(),
total,
})
}
/// GET /_synapse/admin/v1/users/{user_id}/pushers
///
/// Get all pushers for a user
#[endpoint]
pub async fn user_pushers(user_id: PathParam<OwnedUserId>) -> JsonResult<PushersResponse> {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
let pushers = data::user::pusher::get_pushers(&user_id)?;
let total = pushers.len() as i64;
let pusher_list: Vec<serde_json::Value> = pushers
.into_iter()
.map(|p| {
serde_json::json!({
"app_display_name": p.app_display_name,
"app_id": p.app_id,
"device_display_name": p.device_display_name,
"kind": p.kind,
"lang": p.lang,
"pushkey": p.pushkey,
})
})
.collect();
json_ok(PushersResponse {
pushers: pusher_list,
total,
})
}
/// GET /_synapse/admin/v1/users/{user_id}/accountdata
///
/// Get all account data for a user
#[endpoint]
pub async fn user_account_data(user_id: PathParam<OwnedUserId>) -> JsonResult<AccountDataResponse> {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
let global_data = data::user::get_global_account_data(&user_id)?;
let room_data = data::user::get_room_account_data(&user_id)?;
json_ok(AccountDataResponse {
account_data: AccountDataContent {
global: global_data,
rooms: room_data,
},
})
}
/// GET /_synapse/admin/v1/users/{user_id}/override_ratelimit
///
/// Get ratelimit override for a user
#[endpoint]
pub async fn get_user_ratelimit(user_id: PathParam<OwnedUserId>) -> JsonResult<RateLimitResponse> {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
let ratelimit = data::user::get_ratelimit(&user_id)?;
json_ok(RateLimitResponse {
messages_per_second: ratelimit.as_ref().and_then(|r| r.messages_per_second),
burst_count: ratelimit.as_ref().and_then(|r| r.burst_count),
})
}
/// POST /_synapse/admin/v1/users/{user_id}/override_ratelimit
///
/// Set ratelimit override for a user
#[endpoint]
pub async fn set_user_ratelimit(
user_id: PathParam<OwnedUserId>,
body: JsonBody<RateLimitReqBody>,
) -> JsonResult<RateLimitResponse> {
let user_id = user_id.into_inner();
let body = body.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
data::user::set_ratelimit(&user_id, body.messages_per_second, body.burst_count)?;
json_ok(RateLimitResponse {
messages_per_second: body.messages_per_second,
burst_count: body.burst_count,
})
}
/// DELETE /_synapse/admin/v1/users/{user_id}/override_ratelimit
///
/// Delete ratelimit override for a user
#[endpoint]
pub async fn delete_user_ratelimit(user_id: PathParam<OwnedUserId>) -> EmptyResult {
let user_id = user_id.into_inner();
// Verify user exists
if !data::user::user_exists(&user_id)? {
return Err(MatrixError::not_found("User not found").into());
}
data::user::delete_ratelimit(&user_id)?;
empty_ok()
}
// ============================================================================
// Router
// ============================================================================
pub fn router() -> Router {
Router::new()
// Phase 1: MAS Critical
// v2/users (list)
.push(Router::with_path("v2/users").get(list_users_v2))
// v3/users (list)
.push(Router::with_path("v3/users").get(list_users_v3))
// v2/users/{user_id}
.push(
Router::with_path("v2/users/{user_id}")
.get(get_user_v2)
.put(put_user_v2),
)
// v1/users/{user_id}/_allow_cross_signing_replacement_without_uia
.push(
Router::with_path("v1/users/{user_id}/_allow_cross_signing_replacement_without_uia")
.post(allow_cross_signing_replacement),
)
// Phase 2: User Management
// v1/deactivate/{user_id}
.push(Router::with_path("v1/deactivate/{user_id}").post(deactivate_user))
// v1/reset_password/{user_id}
.push(Router::with_path("v1/reset_password/{user_id}").post(reset_password))
// v1/users/{user_id}/admin
.push(
Router::with_path("v1/users/{user_id}/admin")
.get(get_admin_status)
.put(set_admin_status),
)
// v1/users/{user_id}/shadow_ban
.push(
Router::with_path("v1/users/{user_id}/shadow_ban")
.post(shadow_ban_user)
.delete(unshadow_ban_user),
)
// v1/suspend/{user_id}
.push(Router::with_path("v1/suspend/{user_id}").put(suspend_user))
// Phase 2: Extended User Management
// v1/whois/{user_id}
.push(Router::with_path("v1/whois/{user_id}").get(whois_user))
// v1/users/{user_id}/joined_rooms
.push(Router::with_path("v1/users/{user_id}/joined_rooms").get(user_joined_rooms))
// v1/users/{user_id}/pushers
.push(Router::with_path("v1/users/{user_id}/pushers").get(user_pushers))
// v1/users/{user_id}/accountdata
.push(Router::with_path("v1/users/{user_id}/accountdata").get(user_account_data))
// v1/users/{user_id}/override_ratelimit
.push(
Router::with_path("v1/users/{user_id}/override_ratelimit")
.get(get_user_ratelimit)
.post(set_user_ratelimit)
.delete(delete_user_ratelimit),
)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/media.rs | crates/server/src/routing/admin/media.rs | use salvo::prelude::*;
pub fn router() -> Router {
Router::new()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/federation.rs | crates/server/src/routing/admin/federation.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::federation::authorization::{EventAuthReqArgs, EventAuthResBody};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
};
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::room::{state, timeline};
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, json_ok,
};
pub fn router() -> Router {
Router::new()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/scheduled_task.rs | crates/server/src/routing/admin/scheduled_task.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::federation::authorization::{EventAuthReqArgs, EventAuthResBody};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
};
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::room::{state, timeline};
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, json_ok,
};
pub fn router() -> Router {
Router::new()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/server_notice.rs | crates/server/src/routing/admin/server_notice.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::federation::authorization::{EventAuthReqArgs, EventAuthResBody};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
};
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::room::{state, timeline};
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, json_ok,
};
pub fn router() -> Router {
Router::new()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/statistic.rs | crates/server/src/routing/admin/statistic.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::federation::authorization::{EventAuthReqArgs, EventAuthResBody};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
};
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::room::{state, timeline};
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, json_ok,
};
pub fn router() -> Router {
Router::new()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/admin/room.rs | crates/server/src/routing/admin/room.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde::Serialize;
use crate::admin;
use crate::core::client::space::{HierarchyReqArgs, HierarchyResBody};
use crate::{AuthArgs, DepotExt, JsonResult, json_ok};
pub fn router() -> Router {
Router::new().push(Router::with_path("v1").push(
Router::with_path("rooms").get(list_rooms).push(
Router::with_path("{room_id}").push(Router::with_path("hierarchy").get(get_hierarchy)),
),
))
}
#[derive(Debug, Serialize, ToSchema)]
struct RoomInfoResponse {
room_id: String,
name: String,
joined_members: u64,
}
#[derive(Debug, Serialize, ToSchema)]
struct RoomsResponse {
offset: i64,
total_rooms: i64,
#[serde(skip_serializing_if = "Option::is_none")]
next_batch: Option<String>,
rooms: Vec<RoomInfoResponse>,
}
#[handler]
pub fn list_rooms(
from: QueryParam<i64, false>,
limit: QueryParam<i64, false>,
) -> JsonResult<RoomsResponse> {
let offset = from.into_inner().unwrap_or(0).max(0);
let limit = limit.into_inner().unwrap_or(100).clamp(1, 1000);
let all_rooms = crate::room::all_room_ids()?;
let total_rooms = all_rooms.len() as i64;
let rooms: Vec<RoomInfoResponse> = all_rooms
.into_iter()
.skip(offset as usize)
.take(limit as usize)
.map(|room_id| {
let info = admin::get_room_info(&room_id);
RoomInfoResponse {
room_id: info.id.to_string(),
name: info.name,
joined_members: info.joined_members,
}
})
.collect();
let next_batch = if (offset as usize + rooms.len()) < total_rooms as usize {
Some((offset + rooms.len() as i64).to_string())
} else {
None
};
json_ok(RoomsResponse {
offset,
total_rooms,
next_batch,
rooms,
})
}
#[handler]
pub async fn get_hierarchy(
_aa: AuthArgs,
args: HierarchyReqArgs,
depot: &mut Depot,
) -> JsonResult<HierarchyResBody> {
let authed = depot.authed_info()?;
let res_body = crate::room::space::get_room_hierarchy(authed.user_id(), &args).await?;
json_ok(res_body)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/user.rs | crates/server/src/routing/client/user.rs | mod account;
mod filter;
mod openid;
mod room;
use salvo::prelude::*;
use crate::hoops;
pub fn authed_router() -> Router {
Router::with_path("user")
.push(
Router::with_hoop(hoops::limit_rate).push(
Router::with_path("{user_id}")
.push(Router::with_path("mutual_rooms").get(room::get_mutual_rooms))
.push(Router::with_path("openid/request_token").post(openid::request_token)),
),
)
.push(
Router::with_path("{user_id}")
.push(
Router::with_path("filter")
.post(filter::create_filter)
.push(Router::with_path("{filter_id}").get(filter::get_filter)),
)
.push(
Router::with_path("account_data/{event_type}")
.get(account::get_global_data)
.put(account::set_global_data),
)
.push(
Router::with_path("rooms/{room_id}/account_data/{event_type}")
.get(account::get_room_data)
.put(account::set_room_data),
),
)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/user_directory.rs | crates/server/src/routing/client/user_directory.rs | use diesel::prelude::*;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::user_directory::SearchedUser;
use crate::core::client::user_directory::{
SearchUsersReqArgs, SearchUsersReqBody, SearchUsersResBody,
};
use crate::core::events::StateEventType;
use crate::core::events::room::join_rule::RoomJoinRulesEventContent;
use crate::core::identifiers::*;
use crate::core::room::JoinRule;
use crate::data::connect;
use crate::data::schema::*;
use crate::{AuthArgs, DepotExt, JsonResult, data, hoops, json_ok, room};
pub fn authed_router() -> Router {
Router::with_path("user_directory/search")
.hoop(hoops::limit_rate)
.post(search)
}
/// #POST /_matrix/client/r0/user_directory/search
/// Searches all known users for a match.
///
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
/// and don't share a room with the sender
#[endpoint]
fn search(
_aa: AuthArgs,
_args: SearchUsersReqArgs,
body: JsonBody<SearchUsersReqBody>,
depot: &mut Depot,
) -> JsonResult<SearchUsersResBody> {
let authed = depot.authed_info()?;
let body = body.into_inner();
let user_ids = user_profiles::table
.filter(
user_profiles::user_id
.ilike(format!("%{}%", body.search_term))
.or(user_profiles::display_name.ilike(format!("%{}%", body.search_term))),
)
.filter(user_profiles::user_id.ne(authed.user_id()))
.select(user_profiles::user_id)
.load::<OwnedUserId>(&mut connect()?)?;
let mut users = user_ids.into_iter().filter_map(|user_id| {
let user = SearchedUser {
user_id: user_id.clone(),
display_name: data::user::display_name(&user_id).ok().flatten(),
avatar_url: data::user::avatar_url(&user_id).ok().flatten(),
};
let user_is_in_public_rooms =
data::user::joined_rooms(&user_id)
.ok()?
.into_iter()
.any(|room_id| {
room::get_state_content::<RoomJoinRulesEventContent>(
&room_id,
&StateEventType::RoomJoinRules,
"",
None,
)
.map(|r| r.join_rule == JoinRule::Public)
.unwrap_or(false)
});
if user_is_in_public_rooms {
return Some(user);
}
let user_is_in_shared_rooms =
!room::user::shared_rooms(vec![authed.user_id().to_owned(), user_id])
.ok()?
.is_empty();
if user_is_in_shared_rooms {
return Some(user);
}
None
});
let results = users.by_ref().take(body.limit).collect();
let limited = users.next().is_some();
json_ok(SearchUsersResBody { results, limited })
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/sync_v3.rs | crates/server/src/routing/client/sync_v3.rs | use std::time::Duration;
use salvo::prelude::*;
use crate::core::client::sync_events;
use crate::{AuthArgs, DepotExt, JsonResult, json_ok};
/// #GET /_matrix/client/r0/sync
/// Synchronize the client's state with the latest state on the server.
///
/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a
/// previous request for incremental syncs.
///
/// Calling this endpoint without a `since` parameter returns:
/// - Some of the most recent events of each timeline
/// - Notification counts for each room
/// - Joined and invited member counts, heroes
/// - All state events
///
/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
/// For joined rooms:
/// - Some of the most recent events of each timeline that happened after since
/// - If user joined the room after since: All state events (unless lazy loading is activated) and
/// all device list updates in that room
/// - If the user was already in the room: A list of all events that are in the state now, but were
/// not in the state at `since`
/// - If the state we send contains a member event: Joined and invited member counts, heroes
/// - Device list updates that happened after `since`
/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
/// - EDUs that are active now (read receipts, typing updates, presence)
/// - TODO: Allow multiple sync streams to support Pantalaimon
///
/// For invited rooms:
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
///
/// For left rooms:
/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave)
///
/// - Sync is handled in an async task, multiple requests from the same device with the same
/// `since` will be cached
#[endpoint]
pub(super) async fn sync_events_v3(
_aa: AuthArgs,
args: sync_events::v3::SyncEventsReqArgs,
depot: &mut Depot,
) -> JsonResult<sync_events::v3::SyncEventsResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let device_id = authed.device_id();
crate::user::ping_presence(sender_id, &args.set_presence)?;
let mut body = crate::sync_v3::sync_events(sender_id, device_id, &args).await?;
if !args.full_state
&& body.rooms.is_empty()
&& body.presence.is_empty()
&& body.account_data.is_empty()
&& body.device_lists.is_empty()
&& body.to_device.is_empty()
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let default = Duration::from_secs(30);
let duration = std::cmp::min(args.timeout.unwrap_or(default), default);
// Setup watchers, so if there's no response, we can wait for them
let watcher = crate::watcher::watch(sender_id, device_id);
_ = tokio::time::timeout(duration, watcher).await;
// Retry returning data
body = crate::sync_v3::sync_events(sender_id, device_id, &args).await?;
}
json_ok(body)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/key.rs | crates/server/src/routing/client/key.rs | mod device_signing;
mod signature;
use std::collections::HashSet;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::key::{
ClaimKeysReqBody, ClaimKeysResBody, KeyChangesReqArgs, KeyChangesResBody, KeysReqBody,
KeysResBody, UploadKeysReqBody, UploadKeysResBody,
};
use crate::event::BatchToken;
use crate::user::key;
use crate::{AuthArgs, CjsonResult, DepotExt, JsonResult, cjson_ok, data, json_ok, room};
pub fn authed_router() -> Router {
Router::with_path("keys")
.push(Router::with_path("claim").post(claim_keys))
.push(Router::with_path("query").post(query_keys))
.push(Router::with_path("upload").post(upload_keys))
.push(Router::with_path("changes").get(get_key_changes))
.push(Router::with_path("signatures/upload").post(signature::upload))
.push(Router::with_path("device_signing/upload").post(device_signing::upload))
}
/// #POST /_matrix/client/r0/keys/claim
/// Claims one-time keys
#[endpoint]
async fn claim_keys(
_aa: AuthArgs,
body: JsonBody<ClaimKeysReqBody>,
) -> CjsonResult<ClaimKeysResBody> {
cjson_ok(key::claim_one_time_keys(&body.one_time_keys).await?)
}
/// #POST /_matrix/client/r0/keys/query
/// Get end-to-end encryption keys for the given users.
///
/// - Always fetches users from other servers over federation
/// - Gets master keys, self-signing keys, user signing keys and device keys.
/// - The master and self-signing keys contain signatures that the user is allowed to see
#[endpoint]
async fn query_keys(
_aa: AuthArgs,
body: JsonBody<KeysReqBody>,
depot: &mut Depot,
) -> CjsonResult<KeysResBody> {
let authed = depot.authed_info()?;
cjson_ok(
key::query_keys(
Some(authed.user_id()),
&body.device_keys,
|u| u == authed.user_id(),
false,
)
.await?,
)
}
/// #POST /_matrix/client/r0/keys/upload
/// Publish end-to-end encryption keys for the sender device.
///
/// - Adds one time keys
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
#[endpoint]
async fn upload_keys(
_aa: AuthArgs,
body: JsonBody<UploadKeysReqBody>,
depot: &mut Depot,
) -> JsonResult<UploadKeysResBody> {
let authed = depot.authed_info()?;
for (key_id, one_time_key) in &body.one_time_keys {
crate::user::add_one_time_key(authed.user_id(), authed.device_id(), key_id, one_time_key)?;
}
if let Some(device_keys) = &body.device_keys {
crate::user::add_device_keys(authed.user_id(), authed.device_id(), device_keys)?;
}
//TODO: fallback keys. e2e_keys.py 848
json_ok(UploadKeysResBody {
one_time_key_counts: data::user::count_one_time_keys(authed.user_id(), authed.device_id())?,
})
}
/// #POST /_matrix/client/r0/keys/changes
/// Gets a list of users who have updated their device identity keys since the previous sync token.
///
/// - TODO: left users
#[endpoint]
async fn get_key_changes(
_aa: AuthArgs,
args: KeyChangesReqArgs,
depot: &mut Depot,
) -> JsonResult<KeyChangesResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let from_tk: BatchToken = args.from.parse()?;
let to_tk: BatchToken = args.to.parse()?;
let mut device_list_updates = HashSet::new();
device_list_updates.extend(data::user::keys_changed_users(
sender_id,
from_tk.event_sn(),
Some(to_tk.event_sn()),
)?);
for room_id in data::user::joined_rooms(sender_id)? {
device_list_updates.extend(room::keys_changed_users(
&room_id,
from_tk.event_sn(),
Some(to_tk.event_sn()),
)?);
}
json_ok(KeyChangesResBody {
changed: device_list_updates.into_iter().collect(),
left: Vec::new(), // TODO
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/register.rs | crates/server/src/routing/client/register.rs | use diesel::prelude::*;
use palpo_core::presence::PresenceState;
use salvo::oapi::extract::{JsonBody, QueryParam};
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::client::account::{LoginType, RegistrationKind};
use crate::core::client::register::*;
use crate::core::client::uiaa::{AuthFlow, AuthType, UiaaInfo};
use crate::core::events::GlobalAccountDataEventType;
use crate::core::events::push_rules::PushRulesEventContent;
use crate::core::identifiers::*;
use crate::core::push::Ruleset;
use crate::core::serde::JsonValue;
use crate::data::schema::*;
use crate::data::user::{NewDbPresence, NewDbProfile};
use crate::data::{connect, diesel_exists};
use crate::{
AppError, AuthArgs, DEVICE_ID_LENGTH, EmptyResult, JsonResult, MatrixError,
RANDOM_USER_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, config, data, empty_ok, exts::*, hoops,
membership, room, utils,
};
pub fn public_router() -> Router {
Router::with_path("register").push(
Router::with_hoop(hoops::limit_rate)
.push(
Router::new()
.post(register)
.push(Router::with_path("available").get(available)),
)
.push(Router::with_path("m.login.registration_token/validity").get(validate_token)),
)
}
pub fn authed_router() -> Router {
Router::with_path("register")
.push(Router::with_path("email/requestToken").post(token_via_email))
.push(Router::with_path("msisdn/requestToken").post(token_via_msisdn))
}
/// `POST /_matrix/client/*/register`
///
/// Register an account on this homeserver.
/// `/v3/` ([spec])
///
/// [spec]: https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3register
#[endpoint]
async fn register(
aa: AuthArgs,
body: JsonBody<RegisterReqBody>,
req: &mut Request,
depot: &mut Depot,
_res: &mut Response,
) -> JsonResult<RegisterResBody> {
let body = body.into_inner();
// For complement test `TestRequestEncodingFails`.
if body.is_default() {
let payload = req.payload().await?;
if let Err(e) = serde_json::from_slice::<JsonValue>(payload) {
return Err(MatrixError::not_json(format!("invalid json data: {e}")).into());
}
}
let conf = crate::config::get();
if !conf.allow_registration && !aa.from_appservice && conf.registration_token.is_none() {
return Err(MatrixError::forbidden("registration has been disabled", None).into());
}
let is_guest = body.kind == RegistrationKind::Guest;
let user_id = match (&body.username, is_guest) {
(Some(username), false) => {
let proposed_user_id =
UserId::parse_with_server_name(username.to_lowercase(), &conf.server_name)
.ok()
.filter(|user_id| {
!user_id.is_historical() && user_id.server_name() == conf.server_name
})
.ok_or(MatrixError::invalid_username("username is invalid"))?;
proposed_user_id.validate_strict()?;
if data::user::user_exists(&proposed_user_id)? {
return Err(MatrixError::user_in_use("desired user id is already taken").into());
}
proposed_user_id
}
_ => loop {
let proposed_user_id = UserId::parse_with_server_name(
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
&conf.server_name,
)?;
proposed_user_id.validate_strict()?;
if !data::user::user_exists(&proposed_user_id)? {
break proposed_user_id;
}
},
};
if body.login_type == Some(LoginType::Appservice) {
let authed = depot.authed_info()?;
if let Some(appservice) = &authed.appservice {
if !appservice.is_user_match(&user_id) {
return Err(MatrixError::exclusive("User is not in namespace.").into());
}
} else {
return Err(MatrixError::missing_token("Missing appservice token.").into());
}
} else if crate::appservice::is_exclusive_user_id(&user_id)? {
return Err(MatrixError::exclusive("User id reserved by appservice.").into());
}
// UIAA
let mut uiaa_info = UiaaInfo {
flows: vec![AuthFlow {
stages: if conf.registration_token.is_some() {
vec![AuthType::RegistrationToken]
} else {
vec![AuthType::Dummy]
},
}],
completed: Vec::new(),
params: Default::default(),
session: None,
auth_error: None,
};
if body.login_type != Some(LoginType::Appservice) && !is_guest {
if let Some(auth) = &body.auth {
let (authed, uiaa) = crate::uiaa::try_auth(
&UserId::parse_with_server_name("", &conf.server_name)
.expect("we know this is valid"),
&body.device_id.clone().unwrap_or_else(|| "".into()),
auth,
&uiaa_info,
)?;
if !authed {
return Err(AppError::Uiaa(uiaa));
}
} else {
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
crate::uiaa::update_session(
&UserId::parse_with_server_name("", &config::get().server_name)
.expect("we know this is valid"),
&body.device_id.clone().unwrap_or_else(|| "".into()),
uiaa_info.session.as_ref().expect("session is always set"),
Some(&uiaa_info),
)?;
return Err(uiaa_info.into());
}
}
let password = if is_guest {
None
} else {
body.password.as_deref()
};
// Create user
let db_user = crate::user::create_user(user_id.clone(), password)?;
// Default to pretty display_name
let display_name = user_id.localpart().to_owned();
// // If enabled append lightning bolt to display name (default true)
// if config::enable_lightning_bolt() {
// display_name.push_str(" ⚡️");
// }
diesel::insert_into(user_profiles::table)
.values(NewDbProfile {
user_id: user_id.clone(),
room_id: None,
display_name: Some(display_name.clone()),
avatar_url: None,
blurhash: None,
})
.execute(&mut connect()?)?;
// Presence update
crate::data::user::set_presence(
NewDbPresence {
user_id: user_id.clone(),
stream_id: None,
state: Some(PresenceState::Online.to_string()),
status_msg: None,
last_active_at: Some(UnixMillis::now()),
last_federation_update_at: None,
last_user_sync_at: None,
currently_active: None,
occur_sn: None,
},
true,
)?;
// Initial account data
crate::data::user::set_data(
&user_id,
None,
&GlobalAccountDataEventType::PushRules.to_string(),
serde_json::to_value(PushRulesEventContent {
global: Ruleset::server_default(&user_id),
})
.expect("to json always works"),
)?;
// Inhibit login does not work for guests
if !is_guest && body.inhibit_login {
return Ok(Json(RegisterResBody {
access_token: None,
user_id,
device_id: None,
refresh_token: None,
expires_in: None,
}));
}
// Generate new device id if the user didn't specify one
let device_id = if is_guest {
None
} else {
body.device_id.clone()
}
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
// Generate new token for the device
let token = utils::random_string(TOKEN_LENGTH);
//Create device for this account
data::user::device::create_device(
&user_id,
&device_id,
&token,
body.initial_device_display_name.clone(),
Some(req.remote_addr().to_string()),
)?;
// If this is the first real user, grant them admin privileges
// Note: the server user, @palpo:servername, is generated first
if !is_guest {
// TODO: admin
// if let Ok(admin_room) = crate::room::get_admin_room() {
// if crate::room::user::join_count(&admin_room)? == 1 {
// crate::admin::make_admin(&user_id).await?;
// warn!("Granting {} admin privileges as the first user", user_id);
// } else if body.login_type != Some(LoginType::Appservice) {
// info!("New user {} registered on this server.", user_id);
// let _ = crate::admin::send_message(RoomMessageEventContent::notice_plain(format!(
// "New user {user_id} registered on this server."
// )));
// }
// }
}
let from_appservice = if let Ok(authed) = depot.authed_info() {
authed.appservice.is_some()
} else {
false
};
if !from_appservice
&& !conf.auto_join_rooms.is_empty()
&& (conf.allow_guests_auto_join_rooms || !is_guest)
{
for room in &conf.auto_join_rooms {
let Ok(room_id) = room::alias::resolve(room).await else {
error!(
"failed to resolve room alias to room ID when attempting to auto join \
{room}, skipping"
);
continue;
};
if !room::is_server_joined(&conf.server_name, &room_id)? {
warn!("skipping room {room} to automatically join as we have never joined before.");
continue;
}
if let Ok(room_server_name) = room.server_name() {
match membership::join_room(
&db_user,
Some(&device_id),
&room_id,
Some("automatically joining this room upon registration".to_owned()),
&[conf.server_name.clone(), room_server_name.to_owned()],
None,
None,
Default::default(),
)
.await
{
Err(e) => {
// don't return this error so we don't fail registrations
error!("Failed to automatically join room {room} for user {user_id}: {e}");
}
_ => {
info!("automatically joined room {room} for user {user_id}");
}
}
}
}
}
Ok(Json(RegisterResBody {
access_token: Some(token),
user_id,
device_id: Some(device_id),
refresh_token: None,
expires_in: None,
}))
}
/// #GET /_matrix/client/r0/register/available
/// Checks if a username is valid and available on this server.
///
/// Conditions for returning true:
/// - The user id is not historical
/// - The server name of the user id matches this server
/// - No user or appservice on this server already claimed this username
///
/// Note: This will not reserve the username, so the username might become invalid when trying to register
#[endpoint]
async fn available(username: QueryParam<String, true>) -> JsonResult<AvailableResBody> {
let username = username.into_inner().to_lowercase();
// Validate user id
let server_name = &config::get().server_name;
let user_id = UserId::parse_with_server_name(username, server_name)
.ok()
.filter(|user_id| !user_id.is_historical() && user_id.server_name() == server_name)
.ok_or(MatrixError::invalid_username("Username is invalid."))?;
// Check if username is creative enough
let query = users::table.find(&user_id);
if diesel_exists!(query, &mut connect()?)? {
return Err(MatrixError::user_in_use("Desired user ID is already taken.").into());
}
// TODO add check for appservice namespaces
// If no if check is true we have an username that's available to be used.
Ok(Json(AvailableResBody::new(true)))
}
/// `GET /_matrix/client/*/register/m.login.registration_token/validity`
///
/// Checks to see if the given registration token is valid.
/// `/v1/` ([spec])
///
/// [spec]: https://spec.matrix.org/latest/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity
// const METADATA: Metadata = metadata! {
// method: GET,
// rate_limited: true,
// authentication: None,
// history: {
// unstable => "/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity",
// 1.2 => "/_matrix/client/v1/register/m.login.registration_token/validity",
// }
// };
#[endpoint]
async fn validate_token(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
let _authed = depot.authed_info()?;
empty_ok()
}
// `POST /_matrix/client/*/register/email/requestToken`
/// Request a registration token with a 3rd party email.
///
/// `/v3/` ([spec])
///
/// [spec]: https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3registeremailrequesttoken
#[endpoint]
async fn token_via_email(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
let _authed = depot.authed_info()?;
empty_ok()
}
/// `POST /_matrix/client/*/register/msisdn/requestToken`
/// Request a registration token with a phone number.
///
/// `/v3/` ([spec])
///
/// [spec]: https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3registermsisdnrequesttoken
// const METADATA: Metadata = metadata! {
// method: POST,
// rate_limited: false,
// authentication: None,
// history: {
// 1.0 => "/_matrix/client/r0/register/msisdn/requestToken",
// 1.1 => "/_matrix/client/v3/register/msisdn/requestToken",
// }
// };
#[endpoint]
async fn token_via_msisdn(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
let _authed = depot.authed_info()?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/device.rs | crates/server/src/routing/client/device.rs | use diesel::prelude::*;
use palpo_core::UnixMillis;
use salvo::oapi::extract::{JsonBody, PathParam};
use salvo::prelude::*;
use crate::core::OwnedDeviceId;
use crate::core::client::device::{
DeleteDeviceReqBody, DeleteDevicesReqBody, DeviceResBody, DevicesResBody, UpdatedDeviceReqBody,
};
use crate::core::client::uiaa::AuthError;
use crate::core::client::uiaa::{AuthFlow, AuthType, UiaaInfo};
use crate::core::error::ErrorKind;
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::DbUserDevice;
use crate::{
AppError, AuthArgs, DEVICE_ID_LENGTH, DepotExt, EmptyResult, JsonResult, MatrixError,
SESSION_ID_LENGTH, data, empty_ok, json_ok, utils,
};
pub fn authed_router() -> Router {
Router::with_path("devices")
.get(list_devices)
// .push(
// Router::with_hoop(hoops::limit_rate)
// .push(Router::new().post(register).push(Router::with_path("available").get(available)))
// .push(Router::with_path("m.login.registration_token/validity").get(validate_token)),
// )
.push(Router::with_path("delete_devices").post(delete_devices))
.push(
Router::with_path("{device_id}")
.get(get_device)
.delete(delete_device)
.put(update_device),
)
}
/// #GET /_matrix/client/r0/devices/{device_id}
/// Get metadata on a single device of the sender user.
#[endpoint]
async fn get_device(
_aa: AuthArgs,
device_id: PathParam<OwnedDeviceId>,
depot: &mut Depot,
) -> JsonResult<DeviceResBody> {
let authed = depot.authed_info()?;
let Ok(device) = data::user::device::get_device(authed.user_id(), &device_id) else {
return Err(MatrixError::not_found("Device is not found.").into());
};
json_ok(DeviceResBody(device.into_matrix_device()))
}
/// #GET /_matrix/client/r0/devices
/// Get metadata on all devices of the sender user.
#[endpoint]
async fn list_devices(_aa: AuthArgs, depot: &mut Depot) -> JsonResult<DevicesResBody> {
let authed = depot.authed_info()?;
let devices = user_devices::table
.filter(user_devices::user_id.eq(authed.user_id()))
.load::<DbUserDevice>(&mut connect()?)?;
json_ok(DevicesResBody {
devices: devices
.into_iter()
.map(DbUserDevice::into_matrix_device)
.collect(),
})
}
/// #PUT /_matrix/client/r0/devices/{device_id}
/// Updates the metadata on a given device of the sender user.
#[endpoint]
fn update_device(
_aa: AuthArgs,
device_id: PathParam<OwnedDeviceId>,
body: JsonBody<UpdatedDeviceReqBody>,
req: &mut Request,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let device_id = device_id.into_inner();
let device = user_devices::table
.filter(user_devices::device_id.eq(&device_id))
.first::<DbUserDevice>(&mut connect()?)
.optional()?;
if let Some(device) = device {
diesel::update(&device)
.set((
user_devices::display_name.eq(&body.display_name),
user_devices::last_seen_ip.eq(&req.remote_addr().to_string()),
user_devices::last_seen_at.eq(UnixMillis::now()),
))
.execute(&mut connect()?)?;
crate::user::key::send_device_key_update(&device.user_id, &device_id)?;
} else {
let Some(appservice) = authed.appservice() else {
return Err(MatrixError::not_found("Device is not found.").into());
};
if !appservice.registration.device_management {
return Err(MatrixError::not_found("Device is not found.").into());
}
debug!(
"Creating new device for {} from appservice {} as MSC4190 is enabled and device ID does not exist",
authed.user_id(),
appservice.registration.id
);
let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH));
let device = data::user::device::create_device(
authed.user_id(),
&device_id,
&appservice.registration.as_token,
None,
Some(req.remote_addr().to_string()),
)?;
crate::user::key::send_device_key_update(&device.user_id, &device_id)?;
}
empty_ok()
}
/// #DELETE /_matrix/client/r0/devices/{deviceId}
/// Deletes the given device.
///
/// - Requires UIAA to verify user password
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[endpoint]
async fn delete_device(
_aa: AuthArgs,
device_id: PathParam<OwnedDeviceId>,
body: JsonBody<Option<DeleteDeviceReqBody>>,
depot: &mut Depot,
res: &mut Response,
) -> EmptyResult {
let authed = depot.authed_info()?;
let auth = body.into_inner().and_then(|body| body.auth);
let device_id = device_id.into_inner();
// UIAA
let mut uiaa_info = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
session: None,
auth_error: None,
};
let Some(auth) = auth else {
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
uiaa_info.auth_error = Some(AuthError::new(
ErrorKind::Unauthorized,
"Missing authentication data",
));
return Err(uiaa_info.into());
};
if let Err(e) = crate::uiaa::try_auth(authed.user_id(), authed.device_id(), &auth, &uiaa_info) {
if let AppError::Matrix(e) = e
&& let ErrorKind::Forbidden { .. } = e.kind
{
return Err(e.into());
}
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
uiaa_info.auth_error = Some(AuthError::new(
ErrorKind::forbidden(),
"Invalid authentication data",
));
res.status_code(StatusCode::UNAUTHORIZED); // TestDeviceManagement asks http code 401
return Err(uiaa_info.into());
}
data::user::device::remove_device(authed.user_id(), &device_id)?;
empty_ok()
}
/// #DELETE /_matrix/client/r0/devices/{deviceId}
/// Deletes the given device.
///
/// - Requires UIAA to verify user password
///
/// For each device:
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[endpoint]
async fn delete_devices(
_aa: AuthArgs,
body: JsonBody<DeleteDevicesReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let DeleteDevicesReqBody { devices, auth } = body.into_inner();
// UIAA
let uiaa_info = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
session: None,
auth_error: None,
};
let Some(auth) = auth else {
return Err(uiaa_info.into());
};
crate::uiaa::try_auth(authed.user_id(), authed.device_id(), &auth, &uiaa_info)?;
diesel::delete(
user_devices::table
.filter(user_devices::user_id.eq(authed.device_id()))
.filter(user_devices::device_id.eq_any(&devices)),
)
.execute(&mut connect()?)?;
empty_ok()
}
#[endpoint]
pub(super) async fn dehydrated(_aa: AuthArgs) -> EmptyResult {
//TODO: Later
empty_ok()
}
#[endpoint]
pub(super) async fn delete_dehydrated(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
let authed = depot.authed_info()?;
data::user::delete_dehydrated_devices(authed.user_id())?;
empty_ok()
}
#[endpoint]
pub(super) async fn upsert_dehydrated(_aa: AuthArgs) -> EmptyResult {
//TODO: Later
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/unstable.rs | crates/server/src/routing/client/unstable.rs | use salvo::prelude::*;
use crate::hoops;
pub(super) fn router() -> Router {
Router::with_path("unstable")
.hoop(hoops::limit_rate)
.hoop(hoops::auth_by_access_token)
.push(
Router::with_path("org.matrix.msc3391/user/{user_id}/account_data/{account_type}")
.delete(super::account::delete_account_data_msc3391),
)
.push(
Router::with_path("org.matrix.simplified_msc3575/sync")
.post(super::sync_msc4186::sync_events_v5),
)
.push(
Router::with_path("im.nheko.summary/rooms/{room_id_or_alias}/summary")
.get(super::room::summary::get_summary_msc_3266),
)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room_key.rs | crates/server/src/routing/client/room_key.rs | use std::collections::BTreeMap;
use diesel::prelude::*;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::backup::*;
use crate::core::serde::RawJson;
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::key_backup::{self, DbRoomKey, DbRoomKeysVersion};
use crate::{AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, empty_ok, hoops, json_ok};
pub fn authed_router() -> Router {
Router::with_path("room_keys")
.hoop(hoops::limit_rate)
.push(
Router::with_path("keys")
.get(get_keys)
.put(add_keys)
.delete(delete_keys)
.push(
Router::with_path("{room_id}")
.get(get_keys_for_room)
.put(add_keys_for_room)
.delete(delete_room_keys)
.push(
Router::with_path("{session_id}")
.get(get_session_keys)
.put(add_keys_for_session)
.delete(delete_session_keys),
),
),
)
.push(
Router::with_path("version")
.get(latest_version)
.post(create_version)
.push(
Router::with_path("{version}")
.get(get_version)
.post(update_version)
.delete(delete_version),
),
)
}
/// #GET /_matrix/client/r0/room_keys/keys
/// Retrieves all keys from the backup.
#[endpoint]
async fn get_keys(
_aa: AuthArgs,
version: QueryParam<i64, true>,
depot: &mut Depot,
) -> JsonResult<KeysResBody> {
let authed = depot.authed_info()?;
let version = version.into_inner();
let rooms = e2e_room_keys::table
.filter(e2e_room_keys::user_id.eq(authed.user_id()))
.filter(e2e_room_keys::version.eq(version))
.load::<DbRoomKey>(&mut connect()?)?
.into_iter()
.map(|rk| {
let DbRoomKey {
room_id,
session_data,
..
} = rk;
(
room_id,
RawJson::<RoomKeyBackup>::from_value(&session_data).unwrap(),
)
})
.collect();
json_ok(KeysResBody { rooms })
}
/// #GET /_matrix/client/r0/room_keys/keys/{room_id}
/// Retrieves all keys from the backup for a given room.
#[endpoint]
fn get_keys_for_room(
_aa: AuthArgs,
args: KeysForRoomReqArgs,
depot: &mut Depot,
) -> JsonResult<KeysForRoomResBody> {
let authed = depot.authed_info()?;
let DbRoomKey {
room_id,
session_data,
..
} = key_backup::get_room_key(authed.user_id(), &args.room_id, args.version)?.ok_or(
MatrixError::not_found("Backup key not found for this user's room."),
)?;
json_ok(KeysForRoomResBody::new(BTreeMap::from_iter(
[(
room_id,
RawJson::<RoomKeyBackup>::from_value(&session_data).unwrap(),
)]
.into_iter(),
)))
}
/// #GET /_matrix/client/r0/room_keys/keys/{room_id}/{session_id}
/// Retrieves a key from the backup.
#[endpoint]
async fn get_session_keys(
_aa: AuthArgs,
args: KeysForSessionReqArgs,
depot: &mut Depot,
) -> JsonResult<KeyBackupData> {
let authed = depot.authed_info()?;
let key_backup_data = e2e_room_keys::table
.filter(e2e_room_keys::user_id.eq(authed.user_id()))
.filter(e2e_room_keys::version.eq(args.version))
.filter(e2e_room_keys::room_id.eq(&args.room_id))
.filter(e2e_room_keys::session_id.eq(&args.session_id))
// .select(e2e_room_keys::session_data)
.first::<DbRoomKey>(&mut connect()?)
.optional()?
.ok_or(MatrixError::not_found(
"Backup key not found for this user's session.",
))?
.into();
json_ok(key_backup_data)
}
/// #PUT /_matrix/client/r0/room_keys/keys
/// Add the received backup keys to the database.
///
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
#[endpoint]
fn add_keys(
_aa: AuthArgs,
version: QueryParam<i64, true>,
body: JsonBody<AddKeysReqBody>,
depot: &mut Depot,
) -> JsonResult<ModifyKeysResBody> {
let authed = depot.authed_info()?;
let version = version.into_inner();
let keys_version = key_backup::get_latest_room_keys_version(authed.user_id())?
.ok_or(MatrixError::not_found("Key backup does not exist."))?;
if version != keys_version.version {
return Err(MatrixError::invalid_param(
"You may only manipulate the most recently created version of the backup.",
)
.into());
}
for (room_id, room) in &body.rooms {
for (session_id, key_data) in &room.sessions {
key_backup::add_key(authed.user_id(), version, room_id, session_id, key_data)?
}
}
json_ok(ModifyKeysResBody {
count: (key_backup::count_keys(authed.user_id(), version)? as u32).into(),
etag: key_backup::get_etag(authed.user_id(), version)?,
})
}
/// #PUT /_matrix/client/r0/room_keys/keys/{room_id}
/// Add the received backup keys to the database.
///
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
#[endpoint]
fn add_keys_for_room(
_aa: AuthArgs,
args: KeysForRoomReqArgs,
body: JsonBody<AddKeysForRoomReqBody>,
depot: &mut Depot,
) -> JsonResult<ModifyKeysResBody> {
let authed = depot.authed_info()?;
let keys_version = key_backup::get_latest_room_keys_version(authed.user_id())?
.ok_or(MatrixError::not_found("Key backup does not exist."))?;
if args.version != keys_version.version {
return Err(MatrixError::invalid_param(
"You may only manipulate the most recently created version of the backup.",
)
.into());
}
for (session_id, key_data) in &body.sessions {
key_backup::add_key(
authed.user_id(),
args.version,
&args.room_id,
session_id,
key_data,
)?
}
json_ok(ModifyKeysResBody {
count: (key_backup::count_keys(authed.user_id(), args.version)? as u32).into(),
etag: key_backup::get_etag(authed.user_id(), args.version)?,
})
}
/// #PUT /_matrix/client/r0/room_keys/keys/{room_d}/{session_id}
/// Add the received backup key to the database.
///
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
#[endpoint]
fn add_keys_for_session(
_aa: AuthArgs,
args: KeysForSessionReqArgs,
body: JsonBody<AddKeysForSessionReqBody>,
depot: &mut Depot,
) -> JsonResult<KeyBackupData> {
let authed = depot.authed_info()?;
let body = body.into_inner();
let keys_version = key_backup::get_latest_room_keys_version(authed.user_id())?
.ok_or(MatrixError::not_found("Key backup does not exist."))?;
if args.version != keys_version.version {
return Err(MatrixError::invalid_param(
"You may only manipulate the most recently created version of the backup.",
)
.into());
}
key_backup::add_key(
authed.user_id(),
args.version,
&args.room_id,
&args.session_id,
&body.0,
)?;
// json_ok(ModifyKeysResBody {
// count: (key_backup::count_keys(authed.user_id(), args.version)? as u32).into(),
// etag: key_backup::get_etag(authed.user_id(), args.version)?,
// })
json_ok(body.0)
}
/// #GET /_matrix/client/r0/room_keys/version/{version}
/// Get information about an existing backup.
#[endpoint]
fn get_version(
_aa: AuthArgs,
version: PathParam<i64>,
depot: &mut Depot,
) -> JsonResult<VersionResBody> {
let authed = depot.authed_info()?;
let version = version.into_inner();
let algorithm = key_backup::get_room_keys_version(authed.user_id(), version)?
.ok_or(MatrixError::not_found("Key backup does not exist."))?
.algorithm;
json_ok(VersionResBody {
algorithm: serde_json::from_value(algorithm)?,
count: (key_backup::count_keys(authed.user_id(), version)? as u32).into(),
etag: key_backup::get_etag(authed.user_id(), version)?,
version: version.to_string(),
})
}
/// #POST /_matrix/client/r0/room_keys/version
/// Creates a new backup.
#[endpoint]
fn create_version(
_aa: AuthArgs,
body: JsonBody<CreateVersionReqBody>,
depot: &mut Depot,
) -> JsonResult<CreateVersionResBody> {
let authed = depot.authed_info()?;
let algorithm = body.into_inner().0;
let version = key_backup::create_backup(authed.user_id(), &algorithm)?
.version
.to_string();
json_ok(CreateVersionResBody { version })
}
/// #PUT /_matrix/client/r0/room_keys/version/{version}
/// Update information about an existing backup. Only `auth_data` can be modified.
#[endpoint]
fn update_version(
_aa: AuthArgs,
body: JsonBody<CreateVersionReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let algorithm = body.into_inner().0;
key_backup::create_backup(authed.user_id(), &algorithm)?;
empty_ok()
}
/// #GET /_matrix/client/r0/room_keys/version
/// Get information about the latest backup version.
#[endpoint]
fn latest_version(_aa: AuthArgs, depot: &mut Depot) -> JsonResult<VersionResBody> {
let authed = depot.authed_info()?;
let DbRoomKeysVersion {
version, algorithm, ..
} = key_backup::get_latest_room_keys_version(authed.user_id())?
.ok_or(MatrixError::not_found("Key backup does not exist."))?;
json_ok(VersionResBody {
algorithm: serde_json::from_value(algorithm)?,
count: (key_backup::count_keys(authed.user_id(), version)? as u32).into(),
etag: key_backup::get_etag(authed.user_id(), version)?,
version: version.to_string(),
})
}
/// #DELETE /_matrix/client/r0/room_keys/version/{version}
/// Delete an existing key backup.
///
/// - Deletes both information about the backup, as well as all key data related to the backup
#[endpoint]
fn delete_version(_aa: AuthArgs, version: PathParam<i64>, depot: &mut Depot) -> EmptyResult {
let authed = depot.authed_info()?;
let version = version.into_inner();
key_backup::delete_backup(authed.user_id(), version)?;
empty_ok()
}
/// #DELETE /_matrix/client/r0/room_keys/keys
/// Delete the keys from the backup.
#[endpoint]
fn delete_keys(
_aa: AuthArgs,
version: QueryParam<i64, true>,
depot: &mut Depot,
) -> JsonResult<ModifyKeysResBody> {
let authed = depot.authed_info()?;
let version = version.into_inner();
key_backup::delete_all_keys(authed.user_id(), version)?;
json_ok(ModifyKeysResBody {
count: (key_backup::count_keys(authed.user_id(), version)? as u32).into(),
etag: key_backup::get_etag(authed.user_id(), version)?,
})
}
/// #DELETE /_matrix/client/r0/room_keys/keys/{room_id}
/// Delete the keys from the backup for a given room.
#[endpoint]
fn delete_room_keys(
_aa: AuthArgs,
args: KeysForRoomReqArgs,
depot: &mut Depot,
) -> JsonResult<ModifyKeysResBody> {
let authed = depot.authed_info()?;
key_backup::delete_room_keys(authed.user_id(), args.version, &args.room_id)?;
json_ok(ModifyKeysResBody {
count: (key_backup::count_keys(authed.user_id(), args.version)? as u32).into(),
etag: key_backup::get_etag(authed.user_id(), args.version)?,
})
}
/// #DELETE /_matrix/client/r0/room_keys/keys/{room_id}/{session_id}
/// Delete a key from the backup.
#[endpoint]
fn delete_session_keys(
_aa: AuthArgs,
args: KeysForSessionReqArgs,
depot: &mut Depot,
) -> JsonResult<ModifyKeysResBody> {
let authed = depot.authed_info()?;
key_backup::delete_room_key(
authed.user_id(),
args.version,
&args.room_id,
&args.session_id,
)?;
json_ok(ModifyKeysResBody {
count: (key_backup::count_keys(authed.user_id(), args.version)? as u32).into(),
etag: key_backup::get_etag(authed.user_id(), args.version)?,
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/presence.rs | crates/server/src/routing/client/presence.rs | use std::time::Duration;
use salvo::oapi::extract::JsonBody;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::OwnedUserId;
use crate::core::client::presence::{PresenceResBody, SetPresenceReqBody};
use crate::room::state;
use crate::{
AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, hoops, json_ok,
};
pub fn authed_router() -> Router {
Router::with_path("presence/{user_id}/status")
.get(get_status)
.push(Router::with_hoop(hoops::limit_rate).put(set_status))
}
/// #GET /_matrix/client/r0/presence/{user_id}/status
/// Gets the presence state of the given user.
///
/// - Only works if you share a room with the user
#[endpoint]
fn get_status(user_id: PathParam<OwnedUserId>, depot: &mut Depot) -> JsonResult<PresenceResBody> {
if !crate::config::get().presence.allow_local {
return Err(MatrixError::forbidden("Presence is disabled on this server", None).into());
}
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let user_id = user_id.into_inner();
if !state::user_can_see_user(sender_id, &user_id)? {
return Err(
MatrixError::unauthorized("You cannot get the presence state of this user").into(),
);
}
let content = crate::data::user::last_presence(&user_id)?.content;
json_ok(PresenceResBody {
// TODO: Should just use the presenceeventcontent type here?
status_msg: content.status_msg,
currently_active: content.currently_active,
last_active_ago: content.last_active_ago.map(Duration::from_millis),
presence: content.presence,
})
}
/// #PUT /_matrix/client/r0/presence/{user_id}/status
/// Sets the presence state of the sender user.
#[endpoint]
async fn set_status(
_aa: AuthArgs,
user_id: PathParam<OwnedUserId>,
body: JsonBody<SetPresenceReqBody>,
depot: &mut Depot,
) -> EmptyResult {
if !config::get().presence.allow_local {
return Err(MatrixError::forbidden("Presence is disabled on this server", None).into());
}
let authed = depot.authed_info()?;
let user_id = user_id.into_inner();
if authed.user_id() != user_id {
return Err(MatrixError::forbidden(
"You cannot set the presence state of another user",
None,
)
.into());
}
let SetPresenceReqBody {
presence,
status_msg,
} = body.into_inner();
crate::user::set_presence(authed.user_id(), Some(presence), status_msg.clone(), true)?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/session.rs | crates/server/src/routing/client/session.rs | use std::time::Duration;
use diesel::prelude::*;
use palpo_data::user::set_display_name;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::client::session::*;
use crate::core::client::uiaa::{AuthFlow, AuthType, UiaaInfo, UserIdentifier};
use crate::core::identifiers::*;
use crate::core::serde::CanonicalJsonValue;
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::{DbUser, NewDbUser};
use crate::{
AppError, AuthArgs, DEVICE_ID_LENGTH, DepotExt, EmptyResult, JsonResult, MatrixError,
SESSION_ID_LENGTH, TOKEN_LENGTH, config, data, empty_ok, exts::*, hoops, json_ok, user, utils,
};
pub fn public_router() -> Router {
Router::new().push(
Router::with_path("login")
.hoop(hoops::limit_rate)
.get(login_types)
.post(login)
.push(
Router::with_path("sso/redirect")
.get(redirect)
.push(Router::with_path("idpId").get(provider_url)),
),
)
}
pub fn authed_router() -> Router {
Router::new()
.push(
Router::with_path("login")
.hoop(hoops::limit_rate)
.push(Router::with_path("get_token").post(get_access_token)),
)
.push(Router::with_path("refresh").post(refresh_access_token))
.push(
Router::with_path("logout")
.post(logout)
.push(Router::with_path("all").post(logout_all)),
)
}
/// #GET /_matrix/client/r0/login
/// Get the supported login types of this server. One of these should be used as the `type` field
/// when logging in.
#[endpoint]
async fn login_types(_aa: AuthArgs) -> JsonResult<LoginTypesResBody> {
let flows = vec![LoginType::password(), LoginType::appservice()];
Ok(Json(LoginTypesResBody::new(flows)))
}
/// #POST /_matrix/client/r0/login
/// Authenticates the user and returns an access token it can use in subsequent requests.
///
/// - The user needs to authenticate using their password (or if enabled using a json web token)
/// - If `device_id` is known: invalidates old access token of that device
/// - If `device_id` is unknown: creates a new device
/// - Returns access token that is associated with the user and device
///
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
/// supported login types.
#[endpoint]
async fn login(
body: JsonBody<LoginReqBody>,
req: &mut Request,
res: &mut Response,
) -> JsonResult<LoginResBody> {
// Validate login method
// TODO: Other login methods
let user_id = match &body.login_info {
LoginInfo::Password(Password {
identifier,
password,
}) => {
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
user_id.to_lowercase()
} else {
warn!("Bad login type: {:?}", &body.login_info);
return Err(MatrixError::forbidden("Bad login type.", None).into());
};
let user_id = UserId::parse_with_server_name(username, &config::get().server_name)
.map_err(|_| MatrixError::invalid_username("Username is invalid."))?;
// if let Some(ldap) = config::enabled_ldap() {
// let (user_dn, is_ldap_admin) = match ldap.bind_dn.as_ref() {
// Some(bind_dn) if bind_dn.contains("{username}") => {
// (bind_dn.replace("{username}", user_id.localpart()), false)
// }
// _ => {
// debug!("searching user in LDAP");
// let dns = user::search_ldap(&user_id).await?;
// if dns.len() >= 2 {
// return Err(MatrixError::forbidden("LDAP search returned two or more results", None).into());
// }
// if let Some((user_dn, is_admin)) = dns.first() {
// (user_dn.clone(), *is_admin)
// } else {
// let Ok(user) = data::user::get_user(&user_id)? else {
// return Err(MatrixError::forbidden("user not found.", None).into());
// };
// if let Err(_e) = user::vertify_password(&user, password) {
// res.status_code(StatusCode::FORBIDDEN); //for complement testing: TestLogin/parallel/POST_/login_wrong_password_is_rejected
// return Err(MatrixError::forbidden("wrong username or password.", None).into());
// }
// (user_id.to_string(), false)
// }
// }
// };
// let user_id = user::auth_ldap(&user_dn, password).await.map(|()| user_id.to_owned())?;
// // LDAP users are automatically created on first login attempt. This is a very
// // common feature that can be seen on many services using a LDAP provider for
// // their users (synapse, Nextcloud, Jellyfin, ...).
// //
// // LDAP users are crated with a dummy password but non empty because an empty
// // password is reserved for deactivated accounts. The palpo password field
// // will never be read to login a LDAP user so it's not an issue.
// if !data::user::user_exists(&user_id)? {
// let new_user = NewDbUser {
// id: user_id.clone(),
// ty: Some("ldap".to_owned()),
// is_admin: false,
// is_guest: false,
// appservice_id: None,
// created_at: UnixMillis::now(),
// };
// let user = diesel::insert_into(users::table)
// .values(&new_user)
// .on_conflict(users::id)
// .do_update()
// .set(&new_user)
// .get_result::<DbUser>(&mut connect()?)?;
// }
// let is_palpo_admin = data::user::is_admin(&user_id)?;
// if is_ldap_admin && !is_palpo_admin {
// admin::make_admin(&user_id).await?;
// } else if !is_ldap_admin && is_palpo_admin {
// admin::revoke_admin(&user_id).await?;
// }
// } else {
let Ok(user) = data::user::get_user(&user_id) else {
return Err(MatrixError::forbidden("User not found.", None).into());
};
if let Err(_e) = user::verify_password(&user, password) {
res.status_code(StatusCode::FORBIDDEN); //for complement testing: TestLogin/parallel/POST_/login_wrong_password_is_rejected
return Err(MatrixError::forbidden("Wrong username or password.", None).into());
}
// }
user_id
}
LoginInfo::Token(Token { token }) => {
if !crate::config::get().login_via_existing_session {
return Err(MatrixError::unknown("Token login is not enabled.").into());
}
user::take_login_token(token)?
}
LoginInfo::Jwt(info) => {
let conf = config::get();
let jwt_conf = conf
.enabled_jwt()
.ok_or_else(|| MatrixError::unknown("JWT login is not enabled."))?;
let claim = user::session::validate_jwt_token(jwt_conf, &info.token)?;
let local = claim.sub.to_lowercase();
let user_id =
UserId::parse_with_server_name(local, &conf.server_name).map_err(|e| {
MatrixError::invalid_username(format!(
"JWT subject is not a valid user MXID: {e}"
))
})?;
if !data::user::user_exists(&user_id)? {
if !jwt_conf.register_user {
return Err(
MatrixError::not_found("user is not registered on this server.").into(),
);
}
let new_user = NewDbUser {
id: user_id.clone(),
ty: Some("jwt".to_owned()),
is_admin: false,
is_guest: false,
is_local: user_id.server_name().is_local(),
localpart: user_id.localpart().to_string(),
server_name: user_id.server_name().to_owned(),
appservice_id: None,
created_at: UnixMillis::now(),
};
let user = diesel::insert_into(users::table)
.values(&new_user)
.on_conflict(users::id)
.do_update()
.set(&new_user)
.get_result::<DbUser>(&mut connect()?)?;
// Set initial user profile
if let Err(e) = set_display_name(&user.id, user.id.localpart()) {
tracing::warn!("failed to set profile for new user (non-fatal): {}", e);
}
}
user_id
}
LoginInfo::Appservice(Appservice { identifier }) => {
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
user_id.to_lowercase()
} else {
return Err(MatrixError::forbidden("Bad login type.", None).into());
};
UserId::parse_with_server_name(username, &config::get().server_name)
.map_err(|_| MatrixError::invalid_username("Username is invalid."))?
}
_ => {
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
return Err(MatrixError::unknown("Unsupported login type.").into());
}
};
// Generate new device id if the user didn't specify one
let device_id = body
.device_id
.clone()
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
// Generate a new token for the device
let access_token = utils::random_string(TOKEN_LENGTH);
let (refresh_token, refresh_token_id) = if body.refresh_token {
let refresh_token = utils::random_string(TOKEN_LENGTH);
let expires_at = UnixMillis::now().get() + crate::config::get().refresh_token_ttl;
let ultimate_session_expires_at =
UnixMillis::now().get() + crate::config::get().session_ttl;
let refresh_token_id = data::user::device::set_refresh_token(
&user_id,
&device_id,
&refresh_token,
expires_at,
ultimate_session_expires_at,
)?;
(Some(refresh_token), Some(refresh_token_id))
} else {
(None, None)
};
// Determine if device_id was provided and exists in the db for this user
if data::user::device::is_device_exists(&user_id, &device_id)? {
data::user::device::set_access_token(
&user_id,
&device_id,
&access_token,
refresh_token_id,
)?;
} else {
data::user::device::create_device(
&user_id,
&device_id,
&access_token,
body.initial_device_display_name.clone(),
Some(req.remote_addr().to_string()),
)?;
}
tracing::info!("{} logged in", user_id);
json_ok(LoginResBody {
user_id,
access_token,
device_id,
well_known: None,
refresh_token,
expires_in: None,
})
}
/// # `POST /_matrix/client/v1/login/get_token`
///
/// Allows a logged-in user to get a short-lived token which can be used
/// to log in with the m.login.token flow.
///
/// <https://spec.matrix.org/v1.13/client-server-api/#post_matrixclientv1loginget_token>
#[endpoint]
async fn get_access_token(
_aa: AuthArgs,
req: &mut Request,
depot: &mut Depot,
) -> JsonResult<TokenResBody> {
let conf = crate::config::get();
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let device_id = authed.device_id();
if !conf.login_via_existing_session {
return Err(
MatrixError::forbidden("login via an existing session is not enabled", None).into(),
);
}
// This route SHOULD have UIA
// TODO: How do we make only UIA sessions that have not been used before valid?
let mut uiaa_info = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: None,
session: None,
auth_error: None,
};
let payload = req.payload().await?;
let body = serde_json::from_slice::<TokenReqBody>(payload);
if let Ok(Some(auth)) = body.as_ref().map(|b| &b.auth) {
let (worked, uiaa_info) = crate::uiaa::try_auth(sender_id, device_id, auth, &uiaa_info)?;
if !worked {
return Err(AppError::Uiaa(uiaa_info));
}
} else if let Ok(json) = serde_json::from_slice::<CanonicalJsonValue>(payload) {
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
let _ = crate::uiaa::create_session(sender_id, device_id, &uiaa_info, json);
return Err(AppError::Uiaa(uiaa_info));
} else {
return Err(MatrixError::not_json("No JSON body was sent when required.").into());
}
let login_token = utils::random_string(TOKEN_LENGTH);
let expires_in = crate::user::create_login_token(sender_id, &login_token)?;
json_ok(TokenResBody {
expires_in: Duration::from_millis(expires_in),
login_token,
})
}
/// #POST /_matrix/client/r0/logout
/// Log out the current device.
///
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[endpoint]
async fn logout(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
let Ok(authed) = depot.authed_info() else {
return empty_ok();
};
data::user::device::remove_device(authed.user_id(), authed.device_id())?;
empty_ok()
}
/// #POST /_matrix/client/r0/logout/all
/// Log out all devices of this user.
///
/// - Invalidates all access tokens
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets all to-device events
/// - Triggers device list updates
///
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
/// from each device of this user.
#[endpoint]
async fn logout_all(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
let Ok(authed) = depot.authed_info() else {
return empty_ok();
};
data::user::remove_all_devices(authed.user_id())?;
empty_ok()
}
#[endpoint]
async fn refresh_access_token(
_aa: AuthArgs,
body: JsonBody<RefreshTokenReqBody>,
depot: &mut Depot,
) -> JsonResult<RefreshTokenResBody> {
let authed = depot.authed_info()?;
let user_id = authed.user_id();
let device_id = authed.device_id();
crate::user::valid_refresh_token(user_id, device_id, &body.refresh_token)?;
let access_token = utils::random_string(TOKEN_LENGTH);
let refresh_token = utils::random_string(TOKEN_LENGTH);
let expires_at = UnixMillis::now().get() + crate::config::get().refresh_token_ttl;
let ultimate_session_expires_at = UnixMillis::now().get() + crate::config::get().session_ttl;
let refresh_token_id = data::user::device::set_refresh_token(
user_id,
device_id,
&refresh_token,
expires_at,
ultimate_session_expires_at,
)?;
if data::user::device::is_device_exists(user_id, device_id)? {
data::user::device::set_access_token(
user_id,
device_id,
&access_token,
Some(refresh_token_id),
)?;
} else {
return Err(MatrixError::not_found("Device not found.").into());
}
json_ok(RefreshTokenResBody {
access_token,
refresh_token: Some(refresh_token),
expires_in_ms: Some(Duration::from_millis(expires_at - UnixMillis::now().get())),
})
}
#[endpoint]
async fn redirect(_aa: AuthArgs, _redirect_url: QueryParam<String>) -> EmptyResult {
// TODO: todo
empty_ok()
}
#[endpoint]
async fn provider_url(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/third_party.rs | crates/server/src/routing/client/third_party.rs | use salvo::prelude::*;
use crate::core::third_party::*;
use crate::{AuthArgs, JsonResult, json_ok};
pub fn authed_router() -> Router {
Router::with_path("thirdparty")
.push(Router::with_path("protocols").get(protocols))
.push(Router::with_path("protocol").get(protocol))
.push(
Router::with_path("location")
.get(locations)
.push(Router::with_path("{protocol}").get(protocol_locations)),
)
.push(
Router::with_path("user")
.get(users)
.push(Router::with_path("{protocol}").get(protocol_users)),
)
}
/// #GET /_matrix/client/r0/thirdparty/protocols
/// TODO: Fetches all metadata about protocols supported by the homeserver.
#[endpoint]
async fn protocols(_aa: AuthArgs) -> JsonResult<ProtocolsResBody> {
// TODO: LATER
json_ok(ProtocolsResBody::default())
}
#[endpoint]
async fn protocol(_aa: AuthArgs) -> JsonResult<Option<ProtocolResBody>> {
// TODO: LATER
json_ok(None)
}
#[endpoint]
async fn locations(_aa: AuthArgs) -> JsonResult<LocationsResBody> {
// TODO: LATER
json_ok(LocationsResBody::default())
}
#[endpoint]
async fn protocol_locations(_aa: AuthArgs) -> JsonResult<LocationsResBody> {
// TODO: LATER
json_ok(LocationsResBody::default())
}
#[endpoint]
async fn users(_aa: AuthArgs) -> JsonResult<UsersResBody> {
// TODO: LATER
json_ok(UsersResBody::default())
}
#[endpoint]
async fn protocol_users(_aa: AuthArgs) -> JsonResult<UsersResBody> {
// TODO: LATER
json_ok(UsersResBody::default())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/appservice.rs | crates/server/src/routing/client/appservice.rs | use salvo::oapi::extract::{JsonBody, PathParam};
use salvo::prelude::*;
use crate::core::OwnedTransactionId;
use crate::core::appservice::ping::{SendPingReqBody, SendPingResBody, send_ping_request};
use crate::{AuthArgs, DepotExt, JsonResult, MatrixError, json_ok};
pub fn authed_router() -> Router {
Router::with_path("appservice/{appservice_id}/ping").post(ping)
}
#[endpoint]
async fn ping(
_aa: AuthArgs,
appservice_id: PathParam<OwnedTransactionId>,
body: JsonBody<SendPingReqBody>,
depot: &mut Depot,
) -> JsonResult<SendPingResBody> {
let appservice_id = appservice_id.into_inner();
let body = body.into_inner();
let authed = depot.authed_info()?;
let Some(appservice) = authed.appservice.as_ref() else {
return Err(MatrixError::forbidden(
"This endpoint can only be called by appservices.",
None,
)
.into());
};
if appservice_id != appservice.registration.id {
return Err(MatrixError::forbidden(
"Appservices can only ping themselves (wrong appservice ID).",
None,
)
.into());
}
if appservice.registration.url.is_none()
|| appservice
.registration
.url
.as_ref()
.is_some_and(|url| url.is_empty() || url == "null")
{
return Err(MatrixError::url_not_set(
"Appservice does not have a URL set, there is nothing to ping.",
)
.into());
}
let timer = tokio::time::Instant::now();
if let Some(url) = appservice.registration.url.as_ref() {
let request = send_ping_request(
url,
SendPingReqBody {
transaction_id: body.transaction_id.clone(),
},
)?
.into_inner();
crate::sending::send_appservice_request::<()>(appservice.registration.clone(), request)
.await?;
}
json_ok(SendPingResBody::new(timer.elapsed()))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/media.rs | crates/server/src/routing/client/media.rs | use std::fs;
use std::io::Cursor;
use std::path::Path;
use std::str::FromStr;
use diesel::prelude::*;
use image::imageops::FilterType;
use mime::Mime;
use reqwest::Url;
use salvo::fs::NamedFile;
use salvo::http::header::CONTENT_TYPE;
use salvo::http::{HeaderValue, ResBody};
use salvo::prelude::*;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use uuid::Uuid;
use crate::core::UnixMillis;
use crate::core::client::media::*;
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::media::{DbMetadata, DbThumbnail, NewDbMetadata, NewDbThumbnail};
use crate::data::schema::*;
use crate::media::*;
use crate::{
AppResult, AuthArgs, EmptyResult, JsonResult, MatrixError, config, empty_ok, exts::*, hoops,
json_ok, utils,
};
pub fn self_auth_router() -> Router {
Router::with_path("media")
.oapi_tag("client")
.push(
Router::with_path("download/{server_name}/{media_id}")
.hoop(hoops::auth_by_access_token_or_signatures)
.get(get_content)
.push(Router::with_path("{filename}").get(get_content_with_filename)),
)
.push(
Router::with_hoop(hoops::limit_rate)
.hoop(hoops::auth_by_access_token)
.push(Router::with_path("config").get(get_config))
.push(Router::with_path("preview_url").get(preview_url))
.push(Router::with_path("thumbnail/{server_name}/{media_id}").get(get_thumbnail)),
)
}
/// #GET /_matrix/media/r0/download/{server_name}/{media_id}
/// Load media from our server or over federation.
///
/// - Only allows federation if `allow_remote` is true
#[endpoint]
pub async fn get_content(
args: ContentReqArgs,
req: &mut Request,
res: &mut Response,
) -> AppResult<()> {
if let Some(metadata) = crate::data::media::get_metadata(&args.server_name, &args.media_id)? {
let content_type = metadata
.content_type
.as_deref()
.and_then(|c| Mime::from_str(c).ok())
.unwrap_or_else(|| {
metadata
.file_name
.as_ref()
.map(|name| mime_infer::from_path(name).first_or_octet_stream())
.unwrap_or(mime::APPLICATION_OCTET_STREAM)
});
let path = get_media_path(&args.server_name, &args.media_id);
if Path::new(&path).exists() {
if let Some(file_name) = &metadata.file_name {
NamedFile::builder(path).attached_name(file_name)
} else {
NamedFile::builder(path)
}
.content_type(content_type)
.send(req.headers(), res)
.await;
Ok(())
} else {
Err(MatrixError::not_yet_uploaded("Media has not been uploaded yet").into())
}
} else if *args.server_name != config::get().server_name && args.allow_remote {
let mxc = format!("mxc://{}/{}", args.server_name, args.media_id);
fetch_remote_content(&mxc, &args.server_name, &args.media_id, res).await
} else {
Err(MatrixError::not_yet_uploaded("Media has not been uploaded yet").into())
}
}
/// #GET /_matrix/media/r0/download/{server_name}/{media_id}/{file_name}
/// Load media from our server or over federation, permitting desired filename.
///
/// - Only allows federation if `allow_remote` is true
#[endpoint]
pub async fn get_content_with_filename(
args: ContentWithFileNameReqArgs,
req: &mut Request,
res: &mut Response,
) -> AppResult<()> {
let Some(metadata) = crate::data::media::get_metadata(&args.server_name, &args.media_id)?
else {
return Err(MatrixError::not_yet_uploaded("Media has not been uploaded yet").into());
};
let content_type = if let Some(content_type) = metadata.content_type.as_deref() {
content_type.to_owned()
} else {
metadata
.file_name
.as_ref()
.map(|name| mime_infer::from_path(name).first_or_octet_stream())
.unwrap_or(mime::APPLICATION_OCTET_STREAM)
.to_string()
};
if let Ok(content_type) = content_type.parse::<HeaderValue>() {
res.headers_mut().insert(CONTENT_TYPE, content_type);
}
let path = get_media_path(&args.server_name, &args.media_id);
if Path::new(&path).exists() {
let file = NamedFile::builder(path)
.content_type(
metadata
.content_type
.as_deref()
.and_then(|c| Mime::from_str(c).ok())
.unwrap_or(mime::APPLICATION_OCTET_STREAM),
)
.attached_name(args.filename)
.build()
.await?;
// if let Some(Ok(content_disposition)) = metadata.content_disposition.as_deref().map(HeaderValue::from_str) {
// file.set_content_disposition(content_disposition);
// }
file.send(req.headers(), res).await;
Ok(())
} else if *args.server_name != config::get().server_name && args.allow_remote {
let mxc = format!("mxc://{}/{}", args.server_name, args.media_id);
fetch_remote_content(&mxc, &args.server_name, &args.media_id, res).await
} else {
Err(MatrixError::not_yet_uploaded("Media has not been uploaded yet").into())
}
}
#[endpoint]
pub fn create_mxc_uri(_aa: AuthArgs) -> JsonResult<CreateMxcUriResBody> {
let media_id = utils::random_string(crate::MXC_LENGTH);
let mxc = format!("mxc://{}/{}", config::get().server_name, media_id);
Ok(Json(CreateMxcUriResBody {
content_uri: OwnedMxcUri::from(mxc),
unused_expires_at: None,
}))
}
/// #POST /_matrix/media/r0/upload
/// Permanently save media in the server.
///
/// - Some metadata will be saved in the database
/// - Media will be saved in the media/ directory
#[endpoint]
pub async fn create_content(
_aa: AuthArgs,
args: CreateContentReqArgs,
req: &mut Request,
_depot: &mut Depot,
) -> JsonResult<CreateContentResBody> {
// let authed = depot.take_authed_info()?;
let file_name = args.filename.clone();
let file_extension = file_name.as_deref().map(utils::fs::get_file_ext);
let payload = req
.payload_with_max_size(config::get().max_upload_size as usize)
.await
.unwrap();
// let checksum = utils::hash::hash_data_sha2_256(payload)?;
// let media_id = checksum.to_base32_crockford();
let media_id = utils::base32_crockford(Uuid::new_v4().as_bytes());
let mxc = Mxc {
server_name: &config::get().server_name,
media_id: &media_id,
};
let conf = crate::config::get();
let dest_path = get_media_path(&conf.server_name, &media_id);
// let dest_path = Path::new(&dest_path);
// if dest_path.exists() {
// let metadata = fs::metadata(dest_path)?;
// if metadata.len() != payload.len() as u64 {
// if let Err(e) = fs::remove_file(dest_path) {
// tracing::error!(error = ?e, "remove media file failed");
// }
// }
// }
if !dest_path.exists() {
let parent_dir = utils::fs::get_parent_dir(&dest_path);
fs::create_dir_all(&parent_dir)?;
let mut file = File::create(dest_path).await?;
file.write_all(payload).await?;
let metadata = NewDbMetadata {
media_id: media_id.clone(),
origin_server: conf.server_name.clone(),
disposition_type: Some("inline".into()),
content_type: args.content_type.clone(),
file_name,
file_extension,
file_size: payload.len() as i64,
file_hash: None,
created_by: None,
created_at: UnixMillis::now(),
};
crate::data::media::insert_metadata(&metadata)?;
//TODO: thumbnail support
} else {
return Err(MatrixError::cannot_overwrite_media("Media ID already has content").into());
}
json_ok(CreateContentResBody {
content_uri: mxc.to_string().into(),
blurhash: None,
})
}
/// #PUT /_matrix/media/*/upload/{server_name}/{media_id}
/// Upload media to an MXC URI that was created with create_mxc_uri.
#[endpoint]
pub async fn upload_content(
_aa: AuthArgs,
args: UploadContentReqArgs,
req: &mut Request,
_depot: &mut Depot,
) -> EmptyResult {
// let authed = depot.take_authed_info()?;
let file_name = args.filename.clone();
let file_extension = file_name.as_deref().map(utils::fs::get_file_ext);
let conf = crate::config::get();
let payload = req
.payload_with_max_size(conf.max_upload_size as usize)
.await
.unwrap();
// let mxc = format!("mxc://{}/{}", crate::config::get().server_name, args.media_id);
let conf = crate::config::get();
let dest_path = get_media_path(&conf.server_name, &args.media_id);
let dest_path = Path::new(&dest_path);
// if dest_path.exists() {
// let metadata = fs::metadata(dest_path)?;
// if metadata.len() != payload.len() as u64 {
// if let Err(e) = fs::remove_file(dest_path) {
// tracing::error!(error = ?e, "remove media file failed");
// }
// }
// }
if !dest_path.exists() {
let parent_dir = utils::fs::get_parent_dir(dest_path);
fs::create_dir_all(&parent_dir)?;
let mut file = File::create(dest_path).await?;
file.write_all(payload).await?;
let metadata = NewDbMetadata {
media_id: args.media_id.clone(),
origin_server: conf.server_name.clone(),
disposition_type: args
.filename
.clone()
.map(|filename| format!(r#"inline; filename="{filename}""#)),
content_type: args.content_type.clone(),
file_name,
file_extension,
file_size: payload.len() as i64,
file_hash: None,
created_by: None,
created_at: UnixMillis::now(),
};
crate::data::media::insert_metadata(&metadata)?;
//TODO: thumbnail support
empty_ok()
} else {
Err(MatrixError::cannot_overwrite_media("Media ID already has content").into())
}
}
/// #GET /_matrix/media/r0/config
/// Returns max upload size.
#[endpoint]
pub async fn get_config(_aa: AuthArgs) -> JsonResult<ConfigResBody> {
json_ok(ConfigResBody {
upload_size: config::get().max_upload_size.into(),
})
}
/// # `GET /_matrix/client/v1/media/preview_url`
///
/// Returns URL preview.
#[endpoint]
pub async fn preview_url(
_aa: AuthArgs,
args: MediaPreviewReqArgs,
depot: &mut Depot,
) -> JsonResult<MediaPreviewResBody> {
let _sender_id = depot.authed_info()?.user_id();
let url = Url::parse(&args.url)
.map_err(|e| MatrixError::invalid_param(format!("Requested URL is not valid: {e}")))?;
if !crate::media::url_preview_allowed(&url) {
return Err(MatrixError::forbidden("URL is not allowed to be previewed", None).into());
}
let preview = crate::media::get_url_preview(&url).await?;
let res_body = MediaPreviewResBody::from_serialize(&preview)
.map_err(|e| MatrixError::unknown(format!("Failed to parse URL preview: {e}")))?;
json_ok(res_body)
}
//// #GET /_matrix/media/r0/thumbnail/{server_name}/{media_id}
/// Load media thumbnail from our server or over federation.
///
/// - Only allows federation if `allow_remote` is true
/// Downloads a file's thumbnail.
///
/// Here's an example on how it works:
///
/// - Client requests an image with width=567, height=567
/// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails
/// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96)
/// - Server creates the thumbnail and sends it to the user
///
/// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards.
#[endpoint]
pub async fn get_thumbnail(
_aa: AuthArgs,
args: ThumbnailReqArgs,
req: &mut Request,
res: &mut Response,
) -> AppResult<()> {
if args.server_name.is_remote() && args.allow_remote {
let origin = args.server_name.origin().await;
let mut url = Url::parse(&format!(
"{}/_matrix/media/v3/thumbnail/{}/{}",
origin, args.server_name, args.media_id
))?;
{
let mut query = url.query_pairs_mut();
query.append_pair("width", &args.width.to_string());
query.append_pair("height", &args.height.to_string());
query.append_pair("timeout_ms", &args.timeout_ms.as_millis().to_string());
}
let request = crate::sending::get(url).into_inner();
let response =
crate::sending::send_federation_request(&args.server_name, request, None).await?;
*res.headers_mut() = response.headers().clone();
let bytes = response.bytes().await?;
let thumb_path = get_media_path(
&args.server_name,
&format!("{}.{}x{}", args.media_id, args.width, args.height),
);
std::fs::create_dir_all(utils::fs::get_parent_dir(&thumb_path))?;
let mut f = File::create(&thumb_path).await?;
f.write_all(&bytes).await?;
res.body = ResBody::Once(bytes);
return Ok(());
}
match crate::data::media::get_thumbnail_by_dimension(
&args.server_name,
&args.media_id,
args.width,
args.height,
) {
Ok(Some(DbThumbnail {
id,
// content_disposition,
content_type,
..
})) => {
let thumbnail_path = get_thumbnail_path(&args.server_name, &args.media_id, id);
res.add_header("Cross-Origin-Resource-Policy", "cross-origin", true)?;
let _file = NamedFile::builder(&thumbnail_path)
.content_type(if let Some(content_type) = &content_type {
Mime::from_str(content_type)
.ok()
.unwrap_or(mime::APPLICATION_OCTET_STREAM)
} else {
mime::APPLICATION_OCTET_STREAM
})
.build()
.await?;
// if let Some(Ok(content_disposition)) = content_disposition.as_deref().map(HeaderValue::from_str) {
// file.set_content_disposition(content_disposition);
// }
return Ok(());
}
Err(e) => {
tracing::error!(error = ?e, "get_thumbnail error");
return Err(MatrixError::not_found("Media not found.").into());
}
_ => {}
}
let (width, height, crop) =
crate::media::thumbnail_properties(args.width, args.height).unwrap_or((0, 0, false)); // 0, 0 because that's the original file
if let Some(DbThumbnail {
id, content_type, ..
}) = crate::data::media::get_thumbnail_by_dimension(
&args.server_name,
&args.media_id,
width,
height,
)? {
let thumbnail_path = get_thumbnail_path(&args.server_name, &args.media_id, id);
// Using saved thumbnail
let file = NamedFile::builder(&thumbnail_path)
.content_type(if let Some(content_type) = &content_type {
Mime::from_str(content_type)
.ok()
.unwrap_or(mime::APPLICATION_OCTET_STREAM)
} else {
mime::APPLICATION_OCTET_STREAM
})
.build()
.await?;
// if let Some(Ok(content_disposition)) = content_disposition.as_deref().map(HeaderValue::from_str) {
// file.set_content_disposition(content_disposition);
// }
file.send(req.headers(), res).await;
Ok(())
} else if let Ok(Some(DbMetadata {
disposition_type: _,
content_type,
..
})) = crate::data::media::get_metadata(&args.server_name, &args.media_id)
{
// Generate a thumbnail
let image_path = get_media_path(&args.server_name, &args.media_id);
if let Ok(image) = image::open(&image_path) {
let original_width = image.width();
let original_height = image.height();
if width > original_width || height > original_height {
let file = NamedFile::builder(&image_path)
.content_type(
content_type
.as_deref()
.and_then(|c| Mime::from_str(c).ok())
.unwrap_or(mime::APPLICATION_OCTET_STREAM),
)
.build()
.await?;
// if let Some(Ok(content_disposition)) = content_disposition.as_deref().map(HeaderValue::from_str) {
// file.set_content_disposition(content_disposition);
// }
file.send(req.headers(), res).await;
return Ok(());
}
let thumbnail = if crop {
image.resize_to_fill(width, height, FilterType::CatmullRom)
} else {
let (exact_width, exact_height) = {
// Copied from image::dynimage::resize_dimensions
let ratio = u64::from(original_width) * u64::from(height);
let nratio = u64::from(width) * u64::from(original_height);
let use_width = nratio <= ratio;
let intermediate = if use_width {
u64::from(original_height) * u64::from(width) / u64::from(original_width)
} else {
u64::from(original_width) * u64::from(height) / u64::from(original_height)
};
if use_width {
if intermediate <= u64::from(u32::MAX) {
(width, intermediate as u32)
} else {
(
(u64::from(width) * u64::from(u32::MAX) / intermediate) as u32,
u32::MAX,
)
}
} else if intermediate <= u64::from(u32::MAX) {
(intermediate as u32, height)
} else {
(
u32::MAX,
(u64::from(height) * u64::from(u32::MAX) / intermediate) as u32,
)
}
};
image.thumbnail_exact(exact_width, exact_height)
};
let mut thumbnail_bytes = Vec::new();
thumbnail.write_to(
&mut Cursor::new(&mut thumbnail_bytes),
image::ImageFormat::Png,
)?;
// Save thumbnail in database so we don't have to generate it again next time
let thumbnail_id = diesel::insert_into(media_thumbnails::table)
.values(&NewDbThumbnail {
media_id: args.media_id.clone(),
origin_server: args.server_name.clone(),
content_type: Some("image/png".to_owned()),
disposition_type: None,
file_size: thumbnail_bytes.len() as i64,
width: width as i32,
height: height as i32,
resize_method: args.method.clone().unwrap_or_default().to_string(),
created_at: UnixMillis::now(),
})
.on_conflict_do_nothing()
.returning(media_thumbnails::id)
.get_result::<i64>(&mut connect()?)
.optional()?;
let thumbnail_id = if let Some(thumbnail_id) = thumbnail_id {
crate::media::save_thumbnail_file(
&args.server_name,
&args.media_id,
thumbnail_id,
&thumbnail_bytes,
)
.await?;
thumbnail_id
} else {
media_thumbnails::table
.filter(media_thumbnails::media_id.eq(&args.media_id))
.filter(media_thumbnails::width.eq(args.width as i32))
.filter(media_thumbnails::height.eq(args.height as i32))
.filter(
media_thumbnails::resize_method.eq(&args
.method
.clone()
.unwrap_or_default()
.to_string()),
)
.select(media_thumbnails::id)
.first::<i64>(&mut connect()?)?
};
let thumbnail_path =
get_thumbnail_path(&args.server_name, &args.media_id, thumbnail_id);
let file = NamedFile::builder(&thumbnail_path)
.content_type(
content_type
.as_deref()
.and_then(|c| Mime::from_str(c).ok())
.unwrap_or(mime::APPLICATION_OCTET_STREAM),
)
.build()
.await?;
// if let Some(Ok(content_disposition)) = content_disposition.as_deref().map(HeaderValue::from_str) {
// file.set_content_disposition(content_disposition);
// }
file.send(req.headers(), res).await;
Ok(())
} else {
// Couldn't parse file to generate thumbnail, send original
let file = NamedFile::builder(&image_path)
.content_type(
content_type
.as_deref()
.and_then(|c| Mime::from_str(c).ok())
.unwrap_or(mime::APPLICATION_OCTET_STREAM),
)
.build()
.await?;
// if let Some(Ok(content_disposition)) = content_disposition.as_deref().map(HeaderValue::from_str) {
// file.set_content_disposition(content_disposition);
// }
file.send(req.headers(), res).await;
Ok(())
}
} else {
Err(MatrixError::not_found("file not found").into())
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/admin.rs | crates/server/src/routing/client/admin.rs | use salvo::prelude::*;
use crate::{AuthArgs, EmptyResult, empty_ok};
pub fn authed_router() -> Router {
Router::with_path("admin/whois/{user_id}").get(whois)
}
#[endpoint]
async fn whois(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/sync_msc4186.rs | crates/server/src/routing/client/sync_msc4186.rs | use std::cmp;
use std::time::Duration;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::sync_events::v5::*;
use crate::data;
use crate::routing::prelude::*;
/// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync`
/// ([MSC4186])
///
/// A simplified version of sliding sync ([MSC3575]).
///
/// Get all new events in a sliding window of rooms since the last sync or a
/// given point in time.
///
/// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575
/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186
#[handler]
pub(super) async fn sync_events_v5(
_aa: AuthArgs,
args: SyncEventsReqArgs,
req_body: JsonBody<SyncEventsReqBody>,
depot: &mut Depot,
) -> JsonResult<SyncEventsResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let device_id = authed.device_id();
let since_sn: i64 = args
.pos
.as_ref()
.and_then(|string| string.parse().ok())
.unwrap_or_default();
let mut req_body = req_body.into_inner();
let _conn_id = req_body.conn_id.clone();
if since_sn == 0 {
crate::sync_v5::forget_sync_request_connection(
sender_id.to_owned(),
device_id.to_owned(),
req_body.conn_id.to_owned(),
)
}
// Get sticky parameters from cache
let known_rooms = crate::sync_v5::update_sync_request_with_cache(
sender_id.to_owned(),
device_id.to_owned(),
&mut req_body,
);
let mut res_body =
crate::sync_v5::sync_events(sender_id, device_id, since_sn, &req_body, &known_rooms)
.await?;
if since_sn > data::curr_sn()? || (args.pos.is_some() && res_body.is_empty()) {
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let default = Duration::from_secs(30);
let duration = cmp::min(args.timeout.unwrap_or(default), default);
// Setup watchers, so if there's no response, we can wait for them
let watcher = crate::watcher::watch(sender_id, device_id);
_ = tokio::time::timeout(duration, watcher).await;
res_body =
crate::sync_v5::sync_events(sender_id, device_id, since_sn, &req_body, &known_rooms)
.await?;
}
trace!(
rooms=?res_body.rooms.len(),
account_data=?res_body.extensions.account_data.rooms.len(),
receipts=?res_body.extensions.receipts.rooms.len(),
"responding to request with"
);
json_ok(res_body)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/auth.rs | crates/server/src/routing/client/auth.rs | use salvo::prelude::*;
use crate::{AuthArgs, EmptyResult, empty_ok};
pub fn authed_router() -> Router {
Router::with_path("auth/{auth_type}/fallback/web").get(uiaa_fallback)
}
#[endpoint]
async fn uiaa_fallback(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/oidc.rs | crates/server/src/routing/client/oidc.rs | //! # OAuth/OIDC Authentication Module
//!
//! This module implements OAuth 2.0 Authorization Code flow with support for both
//! OpenID Connect (OIDC) providers and pure OAuth 2.0 providers for Matrix server authentication.
//!
//! ## Overview
//!
//! This authentication system allows users to log into the Matrix server using their
//! accounts from external identity providers (Google, GitHub, etc.), eliminating
//! the need for separate Matrix passwords. The implementation supports both:
//! - Standard OIDC providers (Google) with discovery endpoints
//! - Pure OAuth 2.0 providers (GitHub) with custom user info endpoints
//! Both follow the OAuth 2.0 Authorization Code flow with optional PKCE support.
//!
//! ## Authentication Flow Diagram
//!
//! ```text
//! ┌──────────┐ ┌──────────────┐ ┌──────────────────┐ ┌─────────────┐
//! │ Client │────▶│ Palpo Server │────▶│ OAuth Provider │────▶│ Database │
//! │ │ │ │ │ (Google, GitHub) │ │ │
//! └──────────┘ └──────────────┘ └──────────────────┘ └─────────────┘
//! │ │ │ │
//! │ 1. GET /oidc/auth │ │ │
//! │──────────────────▶│ │ │
//! │ │ 2. Generate state │ │
//! │ │ & PKCE challenge │ │
//! │ │ │ │
//! │ 3. Redirect to │ │ │
//! │ provider │ │ │
//! │◀──────────────────│ │ │
//! │ │ │
//! │ 4. User authenticates & grants consent │ │
//! │─────────────────────────────────────────▶│ │
//! │ │ │
//! │ 5. Callback with auth code │ │
//! │─────────────────────────────────────────▶│ │
//! │ │ 6. Exchange code │ │
//! │ │ for tokens │ │
//! │ │─────────────────────▶│ │
//! │ │ 7. Access token │ │
//! │ │◀─────────────────────│ │
//! │ │ 8. Fetch user info │ │
//! │ │─────────────────────▶│ │
//! │ │ 9. User profile data │ │
//! │ │◀─────────────────────│ │
//! │ │ 10. Create/get user │ │
//! │ │─────────────────────────────────────────────▶│
//! │ │ 11. Matrix user & │ │
//! │ │ access token │ │
//! │ │◀─────────────────────────────────────────────│
//! │ 12. Login success │ │ │
//! │◀──────────────────│ │ │
//! ```
//!
//! ## Security Features
//!
//! ### CSRF Protection
//! - Random `state` parameter generated for each auth request
//! - State stored in HTTP-only, secure cookie with short expiration
//! - State validation on callback prevents CSRF attacks
//!
//! ### PKCE (Proof Key for Code Exchange)
//! - Optional code_verifier and code_challenge for enhanced security
//! - Protects against authorization code interception attacks
//! - Especially important for mobile and SPA clients
//!
//! ### Secure Cookie Settings
//! - HTTP-only cookies prevent XSS access
//! - Secure flag ensures HTTPS-only transmission in production
//! - SameSite=Lax provides CSRF protection
//! - Short expiration (10 minutes) limits exposure window
//!
//! ## Supported Providers
//!
//! This implementation supports:
//! - **Google OAuth 2.0**: Full OIDC compliance with discovery endpoint
//! - **GitHub OAuth**: OAuth 2.0 with custom user info endpoint (not OIDC-compliant)
//! - **Generic OIDC**: Any provider with .well-known/openid-configuration
//!
//! ### Provider-specific handling:
//!
//! #### GitHub OAuth
//! - Requires `Accept: application/json` header for token exchange
//! - Requires `User-Agent` header for API requests
//! - Uses different field names (id vs sub, avatar_url vs picture)
//! - **Important**: Email may be null if user has private email settings
//!
//! #### Recommended GitHub Configuration
//! ```toml
//! [oidc]
//! user_mapping = "sub" # Use GitHub ID instead of email
//! require_email_verified = false # Allow users with private emails
//! user_prefix = "github_" # Distinguish GitHub users
//!
//! [oidc.providers.github]
//! issuer = "https://github.com"
//! scopes = ["read:user", "user:email"] # Request email access (may still be private)
//! ```
//!
//! ## User ID Generation
//!
//! Matrix user IDs combine username with provider ID for security:
//! - Ensures uniqueness even if usernames change hands
//! - Prevents account takeover when users rename on GitHub
//!
//! Examples:
//! - GitHub user "octocat" (ID 123) → `@octocat_123:server`
//! - Google user john@gmail.com → `@john_456789:server`
//! - No username/email → `@user_123456:server`
use cookie::time::Duration;
use reqwest;
use salvo::prelude::*;
use serde::{Deserialize, Serialize};
use sha2::Digest;
use std::collections::HashMap;
use url::Url;
use crate::{
AppResult, JsonResult,
config::{self, OidcProviderConfig},
core::{MatrixError, OwnedDeviceId, UnixMillis},
data,
data::user::DbUser,
exts::*,
json_ok,
};
/// OIDC session state for tracking authentication flow
///
/// This structure holds temporary data during the OAuth flow:
/// - CSRF protection via state parameter
/// - PKCE code verifier for enhanced security
/// - Provider selection for multi-provider setups
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OidcSession {
/// CSRF protection state parameter
pub state: String,
/// PKCE code verifier (if PKCE is enabled)
pub code_verifier: Option<String>,
/// Selected provider name
pub provider: String,
/// Session creation timestamp
pub created_at: u64,
}
/// OIDC provider discovery information
///
/// Contains the well-known endpoints for an OIDC provider
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OidcProviderInfo {
pub authorization_endpoint: String,
pub token_endpoint: String,
pub userinfo_endpoint: String,
pub issuer: String,
}
/// Supported OAuth/OIDC provider types
#[derive(Debug, Clone, PartialEq)]
enum ProviderType {
Google,
GitHub,
Generic,
}
impl ProviderType {
fn from_issuer(issuer: &str) -> Self {
match issuer {
"https://accounts.google.com" => Self::Google,
"https://github.com" => Self::GitHub,
_ => Self::Generic,
}
}
}
/// JWT Claims structure for OIDC
#[derive(Debug, Serialize, Deserialize)]
pub struct OidcClaims {
pub sub: String,
pub email: Option<String>,
pub name: Option<String>,
pub picture: Option<String>,
pub email_verified: Option<bool>,
pub exp: i64,
pub iat: i64,
pub iss: String,
pub aud: String,
}
/// OIDC user information
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct OidcUserInfo {
pub sub: String,
pub email: Option<String>,
pub name: Option<String>,
pub picture: Option<String>,
pub email_verified: Option<bool>,
pub preferred_username: Option<String>, // GitHub login/username
}
/// Google OAuth token response
#[derive(Debug, Deserialize)]
struct GoogleTokenResponse {
access_token: String,
token_type: String,
expires_in: i64,
id_token: Option<String>,
}
/// Google user info response
#[derive(Debug, Deserialize)]
struct GoogleUserInfoResponse {
id: String,
email: Option<String>,
name: Option<String>,
picture: Option<String>,
verified_email: Option<bool>,
}
impl From<&OidcClaims> for OidcUserInfo {
fn from(claims: &OidcClaims) -> Self {
Self {
sub: claims.sub.clone(),
email: claims.email.clone(),
name: claims.name.clone(),
picture: claims.picture.clone(),
email_verified: claims.email_verified,
preferred_username: None, // Not available in JWT claims
}
}
}
impl From<GoogleUserInfoResponse> for OidcUserInfo {
fn from(info: GoogleUserInfoResponse) -> Self {
Self {
sub: info.id,
email: info.email,
name: info.name,
picture: info.picture,
email_verified: info.verified_email,
preferred_username: None, // Not available from Google
}
}
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct OidcStatusResponse {
/// Whether OIDC authentication is enabled
pub enabled: bool,
/// Map of available providers with their display information
pub providers: HashMap<String, OidcProviderStatus>,
/// Default provider name (if configured)
pub default_provider: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct OidcProviderStatus {
/// Human-readable display name for this provider
pub display_name: String,
/// OIDC issuer URL
pub issuer: String,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct OidcAuthResponse {
pub auth_url: String,
pub state: String,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct OidcLoginResponse {
pub user_id: String,
pub access_token: String,
pub device_id: String,
pub home_server: String,
}
/// `GET /_matrix/client/*/oidc/status`
///
/// **OIDC Discovery Endpoint**
///
/// Returns information about available OIDC providers and their configuration.
/// Clients use this endpoint to discover which authentication methods are available.
///
/// ## Response Format
/// ```json
/// {
/// "enabled": true,
/// "providers": {
/// "google": {
/// "display_name": "Sign in with Google",
/// "issuer": "https://accounts.google.com"
/// },
/// "github": {
/// "display_name": "Sign in with GitHub",
/// "issuer": "https://github.com"
/// }
/// },
/// "default_provider": "google"
/// }
/// ```
///
/// ## Security Note
/// This endpoint is public and doesn't require authentication to allow
/// clients to discover available authentication methods before login.
#[endpoint]
pub async fn oidc_status() -> JsonResult<OidcStatusResponse> {
let config = config::get();
// Check if OIDC is enabled in configuration
let Some(oidc_config) = config.enabled_oidc() else {
return json_ok(OidcStatusResponse {
enabled: false,
providers: HashMap::new(),
default_provider: None,
});
};
// Build provider status information from configuration
let mut providers = HashMap::new();
for (provider_name, provider_config) in &oidc_config.providers {
let display_name = provider_config
.display_name
.clone()
.unwrap_or_else(|| format!("Sign in with {}", capitalize_first(provider_name)));
providers.insert(
provider_name.clone(),
OidcProviderStatus {
display_name,
issuer: provider_config.issuer.clone(),
},
);
}
json_ok(OidcStatusResponse {
enabled: true,
providers,
default_provider: oidc_config.default_provider.clone(),
})
}
/// Utility function to capitalize the first letter of a string
fn capitalize_first(s: &str) -> String {
let mut chars = s.chars();
match chars.next() {
None => String::new(),
Some(first) => first.to_uppercase().collect::<String>() + chars.as_str(),
}
}
/// Generate a cryptographically secure random string for CSRF/PKCE
fn generate_random_string(length: usize) -> String {
crate::utils::random_string(length)
}
/// Generate PKCE code verifier and challenge
///
/// Returns (code_verifier, code_challenge) tuple
/// Implements proper SHA256 hashing as required by OAuth 2.0 PKCE spec
fn generate_pkce_challenge() -> (String, String) {
use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD};
// Generate 128-bit random verifier (43-128 characters per RFC 7636)
let code_verifier = generate_random_string(96);
// Create SHA256 hash of verifier and base64url encode it (RFC 7636)
let mut hasher = sha2::Sha256::new();
hasher.update(code_verifier.as_bytes());
let hash_result = hasher.finalize();
let code_challenge = URL_SAFE_NO_PAD.encode(&hash_result[..]);
(code_verifier, code_challenge)
}
/// Get provider configuration by name
fn get_provider_config(provider_name: &str) -> Result<&'static OidcProviderConfig, MatrixError> {
let config = config::get();
let oidc_config = config
.enabled_oidc()
.ok_or_else(|| MatrixError::not_found("OIDC not enabled"))?;
oidc_config
.providers
.get(provider_name)
.ok_or_else(|| MatrixError::not_found("Unknown OIDC provider"))
}
/// Discover OIDC endpoints for a provider
///
/// Attempts to fetch the .well-known/openid-configuration endpoint.
/// Falls back to common endpoint patterns for known providers.
async fn discover_provider_endpoints(
provider_config: &OidcProviderConfig,
) -> Result<OidcProviderInfo, MatrixError> {
let discovery_url = format!(
"{}/.well-known/openid-configuration",
provider_config.issuer
);
let client = reqwest::Client::new();
let response = client.get(&discovery_url).send().await;
match response {
Ok(resp) if resp.status().is_success() => {
// Parse discovery document
let discovery: serde_json::Value = resp.json().await.map_err(|e| {
MatrixError::unknown(format!("Failed to parse discovery document: {}", e))
})?;
Ok(OidcProviderInfo {
authorization_endpoint: discovery["authorization_endpoint"]
.as_str()
.ok_or_else(|| {
MatrixError::unknown("Missing authorization_endpoint in discovery")
})?
.to_string(),
token_endpoint: discovery["token_endpoint"]
.as_str()
.ok_or_else(|| MatrixError::unknown("Missing token_endpoint in discovery"))?
.to_string(),
userinfo_endpoint: discovery["userinfo_endpoint"]
.as_str()
.ok_or_else(|| MatrixError::unknown("Missing userinfo_endpoint in discovery"))?
.to_string(),
issuer: discovery["issuer"]
.as_str()
.unwrap_or(&provider_config.issuer)
.to_string(),
})
}
_ => {
// Fallback to common patterns for known providers
let provider_type = ProviderType::from_issuer(&provider_config.issuer);
match provider_type {
ProviderType::Google => Ok(OidcProviderInfo {
authorization_endpoint: "https://accounts.google.com/o/oauth2/v2/auth"
.to_string(),
token_endpoint: "https://oauth2.googleapis.com/token".to_string(),
userinfo_endpoint: "https://www.googleapis.com/oauth2/v2/userinfo".to_string(),
issuer: provider_config.issuer.clone(),
}),
ProviderType::GitHub => Ok(OidcProviderInfo {
authorization_endpoint: "https://github.com/login/oauth/authorize".to_string(),
token_endpoint: "https://github.com/login/oauth/access_token".to_string(),
userinfo_endpoint: "https://api.github.com/user".to_string(),
issuer: provider_config.issuer.clone(),
}),
ProviderType::Generic => Err(MatrixError::unknown(
"Could not discover OIDC endpoints and no fallback available",
)),
}
}
}
}
/// `GET /_matrix/client/*/oidc/auth`
///
/// **OAuth Authorization Initiation Endpoint**
///
/// Starts the OAuth 2.0 Authorization Code flow by redirecting the user to the
/// selected OIDC provider for authentication. This is step 1 of the OIDC flow.
///
/// ## Request Parameters
/// - `provider` (optional): Name of the OIDC provider to use. If not specified,
/// uses the default provider from configuration.
///
/// ## Security Features
/// - **CSRF Protection**: Generates a random `state` parameter and stores it in
/// an HTTP-only cookie for validation on callback.
/// - **PKCE Support**: Optionally generates code_verifier/code_challenge for
/// enhanced security (enabled by default).
/// - **Secure Cookies**: Uses appropriate security flags for production deployment.
///
/// ## Response
/// Redirects (302) to the OIDC provider's authorization endpoint with appropriate
/// OAuth 2.0 parameters including client_id, scopes, and security tokens.
///
/// ## Error Conditions
/// - OIDC not enabled in configuration
/// - Unknown provider specified
/// - Provider discovery/configuration failures
#[endpoint]
pub async fn oidc_auth(req: &mut Request, res: &mut Response) -> AppResult<()> {
// Step 1: Validate OIDC configuration
let config = config::get();
let oidc_config = config
.enabled_oidc()
.ok_or_else(|| MatrixError::not_found("OIDC authentication not enabled"))?;
// Step 2: Determine which provider to use
let provider_name = req
.query::<String>("provider")
.or_else(|| oidc_config.default_provider.clone())
.ok_or_else(|| {
MatrixError::invalid_param("No OIDC provider specified and no default configured")
})?;
let provider_config = oidc_config.providers.get(&provider_name).ok_or_else(|| {
MatrixError::not_found(format!("Unknown OIDC provider: {}", provider_name))
})?;
// Step 3: Discover provider endpoints
let provider_info = discover_provider_endpoints(provider_config).await?;
// Step 4: Generate security tokens
let state = generate_random_string(32);
let (code_verifier, code_challenge) = if oidc_config.enable_pkce {
let (verifier, challenge) = generate_pkce_challenge();
(Some(verifier), Some(challenge))
} else {
(None, None)
};
// Step 5: Create OIDC session for tracking
let session = OidcSession {
state: state.clone(),
code_verifier,
provider: provider_name.clone(),
created_at: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs(),
};
// Step 6: Store session in secure cookie
let session_data = serde_json::to_string(&session)
.map_err(|e| MatrixError::unknown(format!("Failed to serialize OIDC session: {}", e)))?;
// Configure cookie security based on environment
let is_production = !cfg!(debug_assertions);
res.add_cookie(
salvo::http::cookie::Cookie::build(("oidc_session", session_data))
.http_only(true)
.secure(is_production) // HTTPS only in production
.same_site(salvo::http::cookie::SameSite::Lax)
.max_age(Duration::seconds(oidc_config.session_timeout as i64))
.build(),
);
// Step 7: Build OAuth 2.0 authorization URL
let mut auth_url = Url::parse(&provider_info.authorization_endpoint)
.map_err(|e| MatrixError::unknown(format!("Invalid authorization endpoint: {}", e)))?;
// Step 8: Add OAuth 2.0 parameters
{
let mut query_pairs = auth_url.query_pairs_mut();
// Required OAuth 2.0 parameters
query_pairs
.append_pair("client_id", &provider_config.client_id)
.append_pair("redirect_uri", &provider_config.redirect_uri)
.append_pair("response_type", "code")
.append_pair("state", &state);
// Add requested scopes
let scopes = provider_config.scopes.join(" ");
query_pairs.append_pair("scope", &scopes);
// Add PKCE challenge if enabled
if let Some(challenge) = &code_challenge {
query_pairs
.append_pair("code_challenge", challenge)
.append_pair("code_challenge_method", "S256");
}
// Add any additional provider-specific parameters
for (key, value) in &provider_config.additional_params {
query_pairs.append_pair(key, value);
}
}
tracing::info!(
"Starting OIDC authentication flow for provider '{}' with state '{}'",
provider_name,
&state[..8] // Log only first 8 chars for security
);
// Step 9: Redirect user to OIDC provider for authentication
res.render(Redirect::found(auth_url.to_string()));
Ok(())
}
/// `GET /_matrix/client/*/oidc/callback`
///
/// **OAuth Callback Handler - The Heart of OAuth/OIDC Authentication**
///
/// This endpoint handles the OAuth 2.0 callback from the provider after the user
/// has authenticated and granted consent. It automatically detects the provider type
/// from the session and handles provider-specific differences (Google OIDC vs GitHub OAuth).
///
/// ## Callback Flow Breakdown
/// ```text
/// 1. Validate callback parameters (code, state)
/// 2. Restore and validate session from secure cookie (includes provider info)
/// 3. Identify provider type from session for proper handling
/// 4. Exchange authorization code for access token
/// - GitHub: Requires Accept: application/json header
/// - Google: Standard token exchange
/// 5. Fetch user information from provider
/// - GitHub: API endpoint with User-Agent header, different field names
/// - Google: Standard OIDC userinfo endpoint
/// 6. Validate user according to policy (email verification, etc.)
/// 7. Create or retrieve Matrix user account
/// 8. Generate Matrix access token and device
/// 9. Return authentication credentials to client
/// ```
///
/// ## Security Validations
/// - **State Parameter**: Validates CSRF protection token
/// - **Session Timeout**: Ensures authentication session hasn't expired
/// - **PKCE Verification**: Validates code_verifier if PKCE was used
/// - **Email Verification**: Checks email_verified claim (if required)
/// - **Provider Validation**: Ensures token came from correct issuer
///
/// ## Query Parameters
/// - `code`: OAuth 2.0 authorization code from provider
/// - `state`: CSRF protection token (must match stored value)
/// - `error` (optional): Error code if authentication failed
/// - `error_description` (optional): Human-readable error description
///
/// ## Error Handling
/// Comprehensive error handling for all failure scenarios:
/// - Invalid/missing parameters → 400 Bad Request
/// - CSRF token mismatch → 403 Forbidden
/// - Session expired → 401 Unauthorized
/// - Provider communication failures → 502 Bad Gateway
/// - User creation failures → 500 Internal Server Error
#[endpoint]
pub async fn oidc_callback(req: &mut Request) -> JsonResult<OidcLoginResponse> {
// Step 1: Handle OAuth error responses first
if let Some(error) = req.query::<String>("error") {
let error_description = req
.query::<String>("error_description")
.unwrap_or_else(|| "No description provided".to_string());
tracing::warn!(
"OIDC provider returned error: {} - {}",
error,
error_description
);
return Err(MatrixError::forbidden(
format!("Authentication failed: {}", error_description),
None,
)
.into());
}
// Step 2: Extract and validate required callback parameters
let code = req
.query::<String>("code")
.ok_or_else(|| MatrixError::invalid_param("Missing authorization code in callback"))?;
let state = req
.query::<String>("state")
.ok_or_else(|| MatrixError::invalid_param("Missing state parameter in callback"))?;
// Step 3: Restore OIDC session from secure cookie
let session_cookie = req
.cookie("oidc_session")
.ok_or_else(|| MatrixError::unauthorized("OIDC session not found or expired"))?;
let session: OidcSession = serde_json::from_str(session_cookie.value())
.map_err(|e| MatrixError::unauthorized(format!("Invalid OIDC session data: {}", e)))?;
// Step 4: Validate CSRF state parameter
if state != session.state {
tracing::warn!(
"OIDC state mismatch: received '{}', expected '{}'",
&state[..8.min(state.len())],
&session.state[..8.min(session.state.len())]
);
return Err(MatrixError::forbidden("CSRF state validation failed", None).into());
}
// Step 5: Check session timeout
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let config = config::get();
let oidc_config = config
.enabled_oidc()
.ok_or_else(|| MatrixError::unknown("OIDC configuration missing"))?;
if now > session.created_at + oidc_config.session_timeout {
return Err(MatrixError::unauthorized("OIDC session has expired").into());
}
// Step 6: Get provider configuration
let provider_config = oidc_config
.providers
.get(&session.provider)
.ok_or_else(|| {
MatrixError::unknown(format!(
"Provider '{}' no longer configured",
session.provider
))
})?;
// Step 7: Discover provider endpoints (may be cached in production)
let provider_info = discover_provider_endpoints(provider_config).await?;
// Step 8: Exchange authorization code for tokens
let token_response = exchange_code_for_tokens(
&code,
provider_config,
&provider_info,
session.code_verifier.as_deref(),
)
.await?;
// Step 9: Fetch user information from provider
let user_info = get_user_info_from_provider(
&token_response.access_token,
&provider_info,
provider_config,
)
.await?;
// Step 10: Validate user according to configured policies
validate_user_info(&user_info, oidc_config)?;
// Step 11: Generate Matrix user ID using configured mapping strategy
let matrix_user_id =
generate_matrix_user_id(&user_info, oidc_config, config.server_name.as_str())?;
let display_name = generate_display_name(&user_info, provider_config);
// Step 12: Create or retrieve Matrix user account
let user = create_or_get_user(&matrix_user_id, &display_name, &user_info, oidc_config).await?;
// Step 13: Create Matrix device and access token
let device_id = format!("OIDC_{}", generate_random_string(8));
let access_token = create_access_token_for_user(&user, &device_id).await?;
tracing::info!(
"OIDC authentication successful for user '{}' via provider '{}'",
matrix_user_id,
session.provider
);
// Step 14: Return Matrix authentication credentials
json_ok(OidcLoginResponse {
user_id: matrix_user_id,
access_token,
device_id,
home_server: config.server_name.to_string(),
})
}
/// `POST /_matrix/client/*/oidc/login`
///
/// **Direct JWT Token Authentication (Future Enhancement)**
///
/// Alternative authentication method for clients that can obtain OIDC JWT tokens
/// directly from the provider (e.g., mobile apps with native OAuth SDKs).
///
/// ## Implementation Status
/// This endpoint is planned for future implementation and would provide:
/// - Direct JWT ID token validation
/// - Mobile app integration support
/// - Reduced redirect-based flow complexity
/// - Support for native app authentication
///
/// ## Security Requirements for Future Implementation
/// - JWT signature validation against provider's public keys
/// - Issuer and audience claim validation
/// - Token expiration and not-before time checks
/// - Nonce validation for replay protection
///
/// Currently returns "not implemented" to maintain API contract.
#[endpoint]
pub async fn oidc_login(_depot: &mut Depot) -> JsonResult<OidcLoginResponse> {
Err(MatrixError::unknown("Direct JWT authentication not yet implemented - use authorization code flow via /oidc/auth").into())
}
//
// =================== HELPER FUNCTIONS ===================
//
/// **OAuth Token Exchange - Step 2 of OAuth Flow**
///
/// Exchanges the authorization code received from the OIDC provider for an access token
/// and optionally an ID token. This is a server-to-server communication step.
///
/// ## PKCE Verification
/// If PKCE was used in the authorization request, the code_verifier is included to prove
/// that the same client that initiated the flow is completing it.
///
/// ## Security Notes
/// - Client secret is transmitted securely to provider
/// - Request is made over HTTPS only
/// - Response tokens are validated before use
async fn exchange_code_for_tokens(
code: &str,
provider_config: &OidcProviderConfig,
provider_info: &OidcProviderInfo,
code_verifier: Option<&str>,
) -> Result<OAuthTokenResponse, MatrixError> {
let client = reqwest::Client::new();
// Build token exchange request parameters
let mut params = vec![
("client_id", provider_config.client_id.as_str()),
("client_secret", provider_config.client_secret.as_str()),
("code", code),
("grant_type", "authorization_code"),
("redirect_uri", provider_config.redirect_uri.as_str()),
];
// Add PKCE verification if code_verifier is present
if let Some(verifier) = code_verifier {
params.push(("code_verifier", verifier));
}
tracing::debug!(
"Exchanging authorization code for tokens with provider: {}",
provider_info.issuer
);
// Build request with provider-specific headers
let provider_type = ProviderType::from_issuer(&provider_config.issuer);
let request = match provider_type {
ProviderType::GitHub => client
.post(&provider_info.token_endpoint)
.header("Accept", "application/json"),
_ => client.post(&provider_info.token_endpoint),
};
let response = request
.form(¶ms)
.send()
.await
.map_err(|e| MatrixError::unknown(format!("Token exchange request failed: {}", e)))?;
if !response.status().is_success() {
let status = response.status();
let error_text = response.text().await.unwrap_or_default();
tracing::error!(
"Token exchange failed with status {}: {}",
status,
error_text
);
return Err(MatrixError::unknown(format!(
"Token exchange failed: HTTP {}",
status
)));
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | true |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/push_rule.rs | crates/server/src/routing/client/push_rule.rs | use palpo_core::events::push_rules::PushRulesEventContent;
use salvo::oapi::extract::JsonBody;
use salvo::prelude::*;
use crate::core::client::push::{
ConditionalReqBody, PatternedReqBody, RuleActionsResBody, RuleEnabledResBody, RuleResBody,
RulesResBody, SetRuleActionsReqBody, SetRuleEnabledReqBody, SetRuleReqArgs, SimpleReqBody,
};
use crate::core::events::GlobalAccountDataEventType;
use crate::core::push::{
InsertPushRuleError, RemovePushRuleError, RuleScope, ScopeKindRuleReqArgs,
};
use crate::core::push::{
NewConditionalPushRule, NewPatternedPushRule, NewPushRule, NewSimplePushRule, RuleKind,
};
use crate::{
DepotExt, EmptyResult, JsonResult, MatrixError, OptionalExtension, empty_ok, hoops, json_ok,
};
pub fn authed_router() -> Router {
Router::with_path("pushrules")
.get(list_rules)
.push(Router::with_path("global").get(global))
.push(
Router::with_path("{scope}/{kind}/{rule_id}")
.get(get_rule)
.delete(delete_rule)
.push(
Router::with_path("actions")
.get(get_actions)
.put(set_actions),
)
.push(
Router::with_path("enabled")
.get(get_enabled)
.put(set_enabled),
),
)
.push(
Router::with_hoop(hoops::limit_rate)
.push(Router::with_path("{scope}/{kind}/{rule_id}").put(set_rule)),
)
}
#[endpoint]
async fn global() -> EmptyResult {
// TODO: todo
empty_ok()
}
/// #GET /_matrix/client/r0/pushrules/{scope}/{kind}/{rule_id}
/// Retrieves a single specified push rule for this user.
#[endpoint]
fn get_rule(args: ScopeKindRuleReqArgs, depot: &mut Depot) -> JsonResult<RuleResBody> {
let authed = depot.authed_info()?;
let user_data_content = crate::data::user::get_global_data::<PushRulesEventContent>(
authed.user_id(),
&GlobalAccountDataEventType::PushRules.to_string(),
)?
.ok_or(MatrixError::not_found("push rule event not found."))?;
let rule = user_data_content
.global
.get(args.kind.clone(), &args.rule_id)
.map(Into::into);
if let Some(rule) = rule {
json_ok(RuleResBody { rule })
} else {
Err(MatrixError::not_found("push rule not found").into())
}
}
/// #PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{rule_id}
/// Creates a single specified push rule for this user.
#[endpoint]
async fn set_rule(args: SetRuleReqArgs, req: &mut Request, depot: &mut Depot) -> EmptyResult {
let authed = depot.authed_info()?;
let payload = req.payload().await?;
let new_rule: NewPushRule = match &args.kind {
RuleKind::Override => {
let ConditionalReqBody {
actions,
conditions,
} = serde_json::from_slice(payload)?;
NewPushRule::Override(NewConditionalPushRule::new(
args.rule_id.clone(),
conditions,
actions,
))
}
RuleKind::Underride => {
let ConditionalReqBody {
actions,
conditions,
} = serde_json::from_slice(payload)?;
NewPushRule::Underride(NewConditionalPushRule::new(
args.rule_id.clone(),
conditions,
actions,
))
}
RuleKind::Sender => {
let SimpleReqBody { actions } = serde_json::from_slice(payload)?;
let rule_id = args.rule_id.clone().try_into()?;
NewPushRule::Sender(NewSimplePushRule::new(rule_id, actions))
}
RuleKind::Room => {
let SimpleReqBody { actions } = serde_json::from_slice(payload)?;
let rule_id = args.rule_id.clone().try_into()?;
NewPushRule::Room(NewSimplePushRule::new(rule_id, actions))
}
RuleKind::Content => {
let PatternedReqBody { actions, pattern } = serde_json::from_slice(payload)?;
NewPushRule::Content(NewPatternedPushRule::new(
args.rule_id.clone(),
pattern,
actions,
))
}
_ => {
return Err(MatrixError::invalid_param("invalid rule kind").into());
}
};
if args.scope != RuleScope::Global {
return Err(
MatrixError::invalid_param("scopes other than 'global' are not supported").into(),
);
}
let mut user_data_content = crate::data::user::get_data::<PushRulesEventContent>(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
)
.unwrap_or_default();
if let Err(error) =
user_data_content
.global
.insert(new_rule, args.after.as_deref(), args.before.as_deref())
{
let err = match error {
InsertPushRuleError::ServerDefaultRuleId => MatrixError::invalid_param(
"rule ids starting with a dot are reserved for server-default rules",
),
InsertPushRuleError::InvalidRuleId => {
MatrixError::invalid_param("rule ids containing invalid characters")
}
InsertPushRuleError::RelativeToServerDefaultRule => MatrixError::invalid_param(
"can't place a push rule relatively to a server-default rule",
),
InsertPushRuleError::UnknownRuleId => {
MatrixError::not_found("The before or after rule could not be found.")
}
InsertPushRuleError::BeforeHigherThanAfter => MatrixError::invalid_param(
"the before rule has a higher priority than the after rule",
),
_ => MatrixError::invalid_param("invalid data"),
};
return Err(err.into());
}
crate::data::user::set_data(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
serde_json::to_value(user_data_content)?,
)?;
empty_ok()
}
/// #DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{rule_id}
/// Deletes a single specified push rule for this user.
#[endpoint]
async fn delete_rule(args: ScopeKindRuleReqArgs, depot: &mut Depot) -> EmptyResult {
let authed = depot.authed_info()?;
if args.scope != RuleScope::Global {
return Err(
MatrixError::invalid_param("Scopes other than 'global' are not supported.").into(),
);
}
let mut user_data_content = crate::data::user::get_global_data::<PushRulesEventContent>(
authed.user_id(),
&GlobalAccountDataEventType::PushRules.to_string(),
)?
.ok_or(MatrixError::not_found("PushRules event not found."))?;
if let Err(error) = user_data_content
.global
.remove(args.kind.clone(), &args.rule_id)
{
let err = match error {
RemovePushRuleError::ServerDefault => {
MatrixError::invalid_param("cannot delete a server-default push rule")
}
RemovePushRuleError::NotFound => MatrixError::not_found("Push rule not found."),
_ => MatrixError::invalid_param("invalid data"),
};
return Err(err.into());
}
crate::data::user::set_data(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
serde_json::to_value(user_data_content)?,
)?;
empty_ok()
}
/// #GET /_matrix/client/r0/pushrules
/// Retrieves the push rules event for this user.
#[endpoint]
async fn list_rules(depot: &mut Depot) -> JsonResult<RulesResBody> {
let authed = depot.authed_info()?;
let user_data_content = crate::data::user::get_data::<PushRulesEventContent>(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
)
.unwrap_or_default();
json_ok(RulesResBody {
global: user_data_content.global,
})
}
/// #GET /_matrix/client/r0/pushrules/{scope}/{kind}/{rule_id}/actions
/// Gets the actions of a single specified push rule for this user.
#[endpoint]
async fn get_actions(
args: ScopeKindRuleReqArgs,
depot: &mut Depot,
) -> JsonResult<RuleActionsResBody> {
let authed = depot.authed_info()?;
if args.scope != RuleScope::Global {
return Err(
MatrixError::invalid_param("scopes other than 'global' are not supported").into(),
);
}
let user_data_content = crate::data::user::get_data::<PushRulesEventContent>(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
)
.unwrap_or_default();
let actions = user_data_content
.global
.get(args.kind.clone(), &args.rule_id)
.map(|rule| rule.actions().to_owned())
.ok_or(MatrixError::not_found("push rule not found"))?;
json_ok(RuleActionsResBody { actions })
}
/// #PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{rule_id}/actions
/// Sets the actions of a single specified push rule for this user.
#[endpoint]
fn set_actions(
args: ScopeKindRuleReqArgs,
body: JsonBody<SetRuleActionsReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
if args.scope != RuleScope::Global {
return Err(
MatrixError::invalid_param("Scopes other than 'global' are not supported.").into(),
);
}
let mut user_data_content = crate::data::user::get_data::<PushRulesEventContent>(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
)
.map_err(|_| MatrixError::not_found("push rules event not found"))?;
if user_data_content
.global
.set_actions(args.kind.clone(), &args.rule_id, body.actions.clone())
.is_err()
{
return Err(MatrixError::not_found("push rule not found").into());
}
crate::data::user::set_data(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
serde_json::to_value(user_data_content).expect("to json value always works"),
)?;
empty_ok()
}
/// #GET /_matrix/client/r0/pushrules/{scope}/{kind}/{rule_id}/enabled
/// Gets the enabled status of a single specified push rule for this user.
#[endpoint]
fn get_enabled(args: ScopeKindRuleReqArgs, depot: &mut Depot) -> JsonResult<RuleEnabledResBody> {
let authed = depot.authed_info()?;
if args.scope != RuleScope::Global {
return Err(
MatrixError::invalid_param("scopes other than 'global' are not supported").into(),
);
}
let user_data_content = crate::data::user::get_data::<PushRulesEventContent>(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
)?;
let enabled = user_data_content
.global
.get(args.kind.clone(), &args.rule_id)
.map(|r| r.enabled())
.ok_or(MatrixError::not_found("Push rule not found."))?;
json_ok(RuleEnabledResBody { enabled })
}
/// #PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{rule_id}/enabled
/// Sets the enabled status of a single specified push rule for this user.
#[endpoint]
fn set_enabled(
args: ScopeKindRuleReqArgs,
body: JsonBody<SetRuleEnabledReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
if args.scope != RuleScope::Global {
return Err(
MatrixError::invalid_param("scopes other than 'global' are not supported").into(),
);
}
let mut user_data_content = crate::data::user::get_data::<PushRulesEventContent>(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
)?;
if user_data_content
.global
.set_enabled(args.kind.clone(), &args.rule_id, body.enabled)
.is_err()
{
return Err(MatrixError::not_found("push rule not found").into());
}
crate::data::user::set_data(
authed.user_id(),
None,
&GlobalAccountDataEventType::PushRules.to_string(),
serde_json::to_value(user_data_content)?,
)?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/to_device.rs | crates/server/src/routing/client/to_device.rs | use std::collections::BTreeMap;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use ulid::Ulid;
use crate::core::device::DirectDeviceContent;
use crate::core::federation::transaction::Edu;
use crate::core::to_device::{
DeviceIdOrAllDevices, SendEventToDeviceReqArgs, SendEventToDeviceReqBody,
};
use crate::{AuthArgs, DepotExt, EmptyResult, IsRemoteOrLocal, MatrixError, data, empty_ok};
pub fn authed_router() -> Router {
Router::with_path("sendToDevice/{event_type}/{txn_id}").put(send_to_device)
}
/// #PUT /_matrix/client/r0/sendToDevice/{event_type}/{txn_id}
/// Send a to-device event to a set of client devices.
#[endpoint]
fn send_to_device(
_aa: AuthArgs,
args: SendEventToDeviceReqArgs,
body: JsonBody<SendEventToDeviceReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
// Check if this is a new transaction id
if crate::transaction_id::txn_id_exists(
&args.txn_id,
authed.user_id(),
Some(authed.device_id()),
)? {
return empty_ok();
}
for (target_user_id, map) in &body.messages {
for (target_device_id_maybe, event) in map {
if target_user_id.server_name().is_remote() {
let mut map = BTreeMap::new();
map.insert(target_device_id_maybe.clone(), event.clone());
let mut messages = BTreeMap::new();
messages.insert(target_user_id.clone(), map);
let message_id = Ulid::new();
crate::sending::send_reliable_edu(
target_user_id.server_name(),
&Edu::DirectToDevice(DirectDeviceContent {
sender: authed.user_id().to_owned(),
ev_type: args.event_type.clone(),
message_id: message_id.to_string().into(),
messages,
}),
&message_id.to_string(),
)?;
continue;
}
match target_device_id_maybe {
DeviceIdOrAllDevices::DeviceId(target_device_id) => {
data::user::device::add_to_device_event(
authed.user_id(),
target_user_id,
target_device_id,
&args.event_type.to_string(),
event
.deserialize_as()
.map_err(|_| MatrixError::invalid_param("Event is invalid"))?,
)?
}
DeviceIdOrAllDevices::AllDevices => {
for target_device_id in data::user::all_device_ids(target_user_id)? {
data::user::device::add_to_device_event(
authed.user_id(),
target_user_id,
&target_device_id,
&args.event_type.to_string(),
event
.deserialize_as()
.map_err(|_| MatrixError::invalid_param("Event is invalid"))?,
)?;
}
}
}
}
}
// Save transaction id with empty data
crate::transaction_id::add_txn_id(
&args.txn_id,
authed.user_id(),
Some(authed.device_id()),
None,
None,
)?;
empty_ok()
}
#[endpoint]
pub(super) async fn for_dehydrated(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/voip.rs | crates/server/src/routing/client/voip.rs | use std::time::Duration;
use std::time::SystemTime;
use base64::Engine;
use hmac::{Hmac, Mac};
use salvo::prelude::*;
use sha1::Sha1;
use crate::core::UnixSeconds;
use crate::core::client::voip::TurnServerResBody;
use crate::{AuthArgs, DepotExt, JsonResult, MatrixError, config, hoops, json_ok};
type HmacSha1 = Hmac<Sha1>;
pub fn authed_router() -> Router {
Router::with_path("voip/turnServer")
.hoop(hoops::limit_rate)
.get(turn_server)
}
/// #GET /_matrix/client/r0/voip/turnServer
/// TODO: Returns information about the recommended turn server.
#[endpoint]
async fn turn_server(_aa: AuthArgs, depot: &mut Depot) -> JsonResult<TurnServerResBody> {
let authed = depot.authed_info()?;
let conf = config::get();
let turn_conf = conf
.enabled_turn()
.ok_or_else(|| MatrixError::not_found("TURN server is not configured"))?;
// MSC4166: return M_NOT_FOUND 404 if no TURN URIs are specified in any way
if turn_conf.uris.is_empty() {
return Err(MatrixError::not_found("turn_uris is empty").into());
}
let turn_secret = turn_conf.secret.clone();
let (username, password) = if !turn_secret.is_empty() {
let expiry =
UnixSeconds::from_system_time(SystemTime::now() + Duration::from_secs(turn_conf.ttl))
.expect("time is valid");
let username = format!("{}:{}", expiry.get(), authed.user_id());
let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
.expect("HMAC can take key of any size");
mac.update(username.as_bytes());
let password: String =
base64::engine::general_purpose::STANDARD.encode(mac.finalize().into_bytes());
(username, password)
} else {
(turn_conf.username.clone(), turn_conf.password.clone())
};
json_ok(TurnServerResBody {
username,
password,
uris: turn_conf.uris.clone(),
ttl: Duration::from_secs(turn_conf.ttl),
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room.rs | crates/server/src/routing/client/room.rs | mod event;
pub(super) mod membership;
mod message;
mod receipt;
mod relation;
mod space;
mod state;
pub mod summary;
mod tag;
mod thread;
use std::cmp::max;
use std::collections::BTreeMap;
pub(crate) use membership::knock_room;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde_json::json;
use serde_json::value::to_raw_value;
use ulid::Ulid;
use crate::core::UnixMillis;
use crate::core::client::directory::{PublicRoomsFilteredReqBody, PublicRoomsReqArgs};
use crate::core::client::room::{
AliasesResBody, CreateRoomReqBody, CreateRoomResBody, CreationContent, InitialSyncReqArgs,
InitialSyncResBody, PaginationChunk, RoomPreset, SetReadMarkerReqBody, UpgradeRoomReqBody,
UpgradeRoomResBody,
};
use crate::core::directory::{PublicRoomFilter, PublicRoomsResBody, RoomNetwork};
use crate::core::events::fully_read::{FullyReadEvent, FullyReadEventContent};
use crate::core::events::receipt::{
Receipt, ReceiptEvent, ReceiptEventContent, ReceiptThread, ReceiptType,
};
use crate::core::events::room::canonical_alias::RoomCanonicalAliasEventContent;
use crate::core::events::room::create::RoomCreateEventContent;
use crate::core::events::room::guest_access::{GuestAccess, RoomGuestAccessEventContent};
use crate::core::events::room::history_visibility::{
HistoryVisibility, RoomHistoryVisibilityEventContent,
};
use crate::core::events::room::join_rule::RoomJoinRulesEventContent;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::events::room::name::RoomNameEventContent;
use crate::core::events::room::power_levels::RoomPowerLevelsEventContent;
use crate::core::events::room::tombstone::RoomTombstoneEventContent;
use crate::core::events::room::topic::RoomTopicEventContent;
use crate::core::events::{self, RoomAccountDataEventType, StateEventType, TimelineEventType};
use crate::core::identifiers::*;
use crate::core::room::{JoinRule, Visibility};
use crate::core::room_version_rules::{AuthorizationRules, RoomIdFormatVersion, RoomVersionRules};
use crate::core::serde::{CanonicalJsonObject, JsonValue, RawJson};
use crate::core::state::events::RoomCreateEvent;
use crate::event::PduBuilder;
use crate::room::{push_action, timeline};
use crate::user::user_is_ignored;
use crate::{
AppResult, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, RoomMutexGuard, config,
data, empty_ok, hoops, json_ok, room,
};
const LIMIT_MAX: usize = 100;
pub fn public_router() -> Router {
Router::with_path("rooms").push(
Router::with_path("{room_id}").push(Router::with_path("initialSync").get(initial_sync)),
)
}
pub fn authed_router() -> Router {
Router::with_path("rooms")
.push(
Router::with_hoop(hoops::limit_rate).push(
Router::with_path("{room_id}")
.push(Router::with_path("forget").post(membership::forget_room))
.push(Router::with_path("leave").post(membership::leave_room))
.push(Router::with_path("join").post(membership::join_room_by_id))
.push(Router::with_path("invite").post(membership::invite_user))
.push(Router::with_path("read_markers").post(set_read_markers))
.push(Router::with_path("aliases").get(get_aliases))
.push(Router::with_path("hierarchy").get(space::get_hierarchy))
.push(Router::with_path("threads").get(thread::list_threads))
.push(Router::with_path("typing/{user_id}").put(state::send_typing))
.push(
Router::with_path("receipt/{receipt_type}/{event_id}")
.post(receipt::send_receipt)
.put(receipt::send_receipt),
)
.push(Router::with_path("timestamp_to_event").get(event::timestamp_to_event)),
),
)
.push(
Router::with_hoop(hoops::limit_rate).push(
Router::with_path("{room_id}")
.push(Router::with_path("ban").post(membership::ban_user))
.push(Router::with_path("unban").post(membership::unban_user))
.push(Router::with_path("kick").post(membership::kick_user))
.push(Router::with_path("members").get(membership::get_members))
.push(Router::with_path("joined_members").get(membership::joined_members))
.push(
Router::with_path("state").get(state::get_state).push(
Router::with_path("{event_type}")
.put(state::send_state_for_empty_key)
.get(state::state_for_empty_key)
.push(
Router::with_path("{state_key}")
.put(state::send_state_for_key)
.get(state::state_for_key),
),
),
)
.push(
Router::with_path("context")
.push(Router::with_path("{event_id}").get(event::get_context)),
)
.push(
Router::with_path("relations").push(
Router::with_path("{event_id}")
.get(relation::get_relation)
.push(
Router::with_path("{rel_type}")
.get(relation::get_relation_by_rel_type)
.push(Router::with_path("{event_type}").get(
relation::get_relation_by_rel_type_and_event_type,
)),
),
),
)
.push(Router::with_path("upgrade").post(upgrade))
.push(Router::with_path("messages").get(message::get_messages))
.push(Router::with_path("send/{event_type}").post(message::post_message))
.push(
Router::with_path("send/{event_type}/{txn_id}").put(message::send_message),
)
.push(Router::with_path("redact/{event_id}/{txn_id}").put(event::send_redact))
.push(
Router::with_path("tags").get(tag::list_tags).push(
Router::with_path("{tag}")
.put(tag::upsert_tag)
.delete(tag::delete_tag),
),
)
.push(
Router::with_path("event").push(
Router::with_path("{event_id}")
.get(event::get_room_event)
.post(event::report),
),
),
),
)
}
// `#GET /_matrix/client/r0/rooms/{room_id}/initialSync`
#[endpoint]
async fn initial_sync(
_aa: AuthArgs,
args: InitialSyncReqArgs,
depot: &mut Depot,
) -> JsonResult<InitialSyncResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let room_id = &args.room_id;
if !room::state::user_can_see_events(sender_id, room_id)? {
return Err(MatrixError::forbidden("No room preview available.", None).into());
}
let limit = LIMIT_MAX;
let events =
timeline::stream::load_pdus_backward(Some(sender_id), room_id, None, None, None, limit)?;
let frame_id = room::get_frame_id(room_id, None).unwrap_or_default();
let state: Vec<_> = room::state::get_full_state(frame_id)
.unwrap_or_default()
.into_values()
.map(|event| event.to_state_event())
.collect::<Vec<_>>();
let messages = PaginationChunk {
start: events
.last()
.map(|(sn, _)| sn)
.as_ref()
.map(ToString::to_string),
end: events
.first()
.map(|(sn, _)| sn)
.as_ref()
.map(ToString::to_string)
.unwrap_or_default(),
chunk: events
.into_iter()
.map(|(_sn, event)| event.to_room_event())
.collect(),
};
json_ok(InitialSyncResBody {
room_id: room_id.to_owned(),
account_data: None,
state: state.into(),
messages: if !messages.chunk.is_empty() {
Some(messages)
} else {
None
},
visibility: room::directory::visibility(room_id).into(),
membership: room::user::membership(sender_id, room_id).ok(),
})
}
/// `#POST /_matrix/client/r0/rooms/{room_id}/read_markers`
/// Sets different types of read markers.
///
/// - Updates fully-read account data event to `fully_read`
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
#[endpoint]
fn set_read_markers(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<SetReadMarkerReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let room_id = room_id.into_inner();
if let Some(fully_read) = &body.fully_read {
let fully_read_event = FullyReadEvent {
content: FullyReadEventContent {
event_id: fully_read.clone(),
},
};
crate::data::user::set_data(
sender_id,
Some(room_id.clone()),
&RoomAccountDataEventType::FullyRead.to_string(),
serde_json::to_value(fully_read_event.content).expect("to json value always works"),
)?;
push_action::remove_actions_for_room(sender_id, &room_id)?;
}
if let Some(event_id) = &body.private_read_receipt {
let (event_sn, _event_guard) = crate::event::ensure_event_sn(&room_id, event_id)?;
data::room::receipt::set_private_read(&room_id, sender_id, event_id, event_sn)?;
push_action::remove_actions_until(sender_id, &room_id, event_sn, None)?;
push_action::refresh_notify_summary(sender_id, &room_id)?;
}
if let Some(event) = &body.read_receipt {
let mut user_receipts = BTreeMap::new();
user_receipts.insert(
sender_id.to_owned(),
Receipt {
ts: Some(UnixMillis::now()),
thread: ReceiptThread::Unthreaded,
},
);
let mut receipts = BTreeMap::new();
receipts.insert(ReceiptType::Read, user_receipts);
let mut receipt_content = BTreeMap::new();
receipt_content.insert(event.to_owned(), receipts);
room::receipt::update_read(
sender_id,
&room_id,
&ReceiptEvent {
content: ReceiptEventContent(receipt_content),
room_id: room_id.clone(),
},
true,
)?;
let event_sn = crate::event::get_event_sn(event)?;
push_action::remove_actions_until(sender_id, &room_id, event_sn, None)?;
push_action::refresh_notify_summary(sender_id, &room_id)?;
}
empty_ok()
}
/// #GET /_matrix/client/r0/rooms/{room_id}/aliases
/// Lists all aliases of the room.
///
/// - Only users joined to the room are allowed to call this
/// TODO: Allow any user to call it if history_visibility is world readable
#[endpoint]
async fn get_aliases(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
depot: &mut Depot,
) -> JsonResult<AliasesResBody> {
let authed = depot.authed_info()?;
if !room::user::is_joined(authed.user_id(), &room_id)? {
return Err(
MatrixError::forbidden("you don't have permission to view this room", None).into(),
);
}
json_ok(AliasesResBody {
aliases: room::local_aliases_for_room(&room_id)?,
})
}
/// #POST /_matrix/client/r0/rooms/{room_id}/upgrade
/// Upgrades the room.
///
/// - Creates a replacement room
/// - Sends a tombstone event into the current room
/// - Sender user joins the room
/// - Transfers some state events
/// - Moves local aliases
/// - Modifies old room power levels to prevent users from speaking
#[endpoint]
async fn upgrade(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<UpgradeRoomReqBody>,
depot: &mut Depot,
) -> JsonResult<UpgradeRoomResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let room_id = room_id.into_inner();
if !config::supported_room_versions().contains(&body.new_version) {
return Err(MatrixError::unsupported_room_version(
"This server does not support that room version.",
)
.into());
}
let conf = config::get();
let version_rules = crate::room::get_version_rules(&body.new_version)?;
// Create a replacement room
let new_room_id = if version_rules.authorization.room_create_event_id_as_room_id {
OwnedRoomId::try_from(format!("!placehold_{}", Ulid::new().to_string()))
.expect("room id should be valid")
} else {
RoomId::new_v1(&conf.server_name)
};
let state_lock = room::lock_state(&room_id).await;
room::ensure_room(&new_room_id, &body.new_version)?;
// Use the m.room.tombstone event as the predecessor
let predecessor = Some(events::room::create::PreviousRoom::new(room_id.clone()));
// Get the old room creation event
let mut create_event_content = room::get_state_content::<CanonicalJsonObject>(
&room_id,
&StateEventType::RoomCreate,
"",
None,
)?;
if !version_rules.authorization.use_room_create_sender {
create_event_content.insert(
"creator".into(),
json!(sender_id)
.try_into()
.map_err(|_| MatrixError::bad_json("error forming creation event"))?,
);
} else {
// "creator" key no longer exists in V11+ rooms
create_event_content.remove("creator");
}
if version_rules.authorization.additional_room_creators && !body.additional_creators.is_empty()
{
create_event_content.insert(
"additional_creators".into(),
json!(&body.additional_creators)
.try_into()
.map_err(|_| MatrixError::bad_json("error forming additional_creators"))?,
);
}
create_event_content.insert(
"room_version".into(),
json!(&body.new_version)
.try_into()
.map_err(|_| MatrixError::bad_json("error forming creation event"))?,
);
create_event_content.insert(
"predecessor".into(),
json!(predecessor)
.try_into()
.map_err(|_| MatrixError::bad_json("error forming creation event"))?,
);
// Validate creation event content
let de_result = serde_json::from_str::<CanonicalJsonObject>(
to_raw_value(&create_event_content)
.expect("error forming creation event")
.get(),
);
if de_result.is_err() {
return Err(MatrixError::bad_json("error forming creation event").into());
}
let new_create_event = timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&create_event_content)
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&new_room_id,
&crate::room::get_version(&new_room_id)?,
&state_lock,
)
.await?;
// Room Version 12+ use temp room id before.
let new_room_id = new_create_event.room_id.clone();
let new_create_event = RoomCreateEvent::new(new_create_event.pdu);
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
// Fail if the sender does not have the required permissions
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomTombstone,
content: to_raw_value(&RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: new_room_id.clone(),
})?,
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&crate::room::get_version(&room_id)?,
&state_lock,
)
.await?;
// // Use the m.room.tombstone event as the predecessor
// let predecessor = Some(crate::core::events::room::create::PreviousRoom::new(
// room_id.clone(),
// (*tombstone_event_id).to_owned(),
// ));
// Join the new room
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
display_name: crate::data::user::display_name(sender_id).ok().flatten(),
avatar_url: crate::data::user::avatar_url(sender_id).ok().flatten(),
is_direct: None,
third_party_invite: None,
blurhash: crate::data::user::blurhash(sender_id).ok().flatten(),
reason: None,
join_authorized_via_users_server: None,
extra_data: Default::default(),
})
.expect("event is valid, we just created it"),
state_key: Some(sender_id.to_string()),
..Default::default()
},
sender_id,
&new_room_id,
&body.new_version,
&state_lock,
)
.await?;
// Recommended transferable state events list from the specs
let transferable_state_events = vec![
StateEventType::RoomServerAcl,
StateEventType::RoomEncryption,
StateEventType::RoomName,
StateEventType::RoomAvatar,
StateEventType::RoomTopic,
StateEventType::RoomGuestAccess,
StateEventType::RoomHistoryVisibility,
StateEventType::RoomJoinRules,
StateEventType::RoomPowerLevels,
];
// Replicate transferable state events to the new room
for event_ty in transferable_state_events {
if event_ty == StateEventType::RoomPowerLevels {
continue; // Handled later
}
let event_content = match room::get_state(&room_id, &event_ty, "", None) {
Ok(v) => v.content.clone(),
_ => continue, // Skipping missing events.
};
timeline::build_and_append_pdu(
PduBuilder {
event_type: event_ty.to_string().into(),
content: event_content,
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&new_room_id,
&body.new_version,
&state_lock,
)
.await?;
}
// Moves any local aliases to the new room
for alias in room::local_aliases_for_room(&room_id)? {
room::set_alias(&new_room_id, &alias, sender_id)?;
}
// Get the old room power levels
let mut power_levels_event_content = room::get_state_content::<RoomPowerLevelsEventContent>(
&room_id,
&StateEventType::RoomPowerLevels,
"",
None,
)?;
// Setting events_default and invite to the greater of 50 and users_default + 1
let restricted_level = max(50, power_levels_event_content.users_default + 1);
if power_levels_event_content.events_default < restricted_level {
power_levels_event_content.events_default = restricted_level;
}
if power_levels_event_content.invite < restricted_level {
power_levels_event_content.invite = restricted_level;
}
// Modify the power levels in the old room to prevent sending of events and inviting new users
let _ = timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_event_content)
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&crate::room::get_version(&room_id)?,
&state_lock,
)
.await?;
if version_rules
.authorization
.explicitly_privilege_room_creators
{
let creators = new_create_event.creators()?;
for creator in &creators {
power_levels_event_content.users.remove(creator);
}
power_levels_event_content.users.remove(sender_id);
}
let _ = timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_event_content)
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&new_room_id,
&body.new_version,
&state_lock,
)
.await?;
// Return the replacement room id
json_ok(UpgradeRoomResBody {
replacement_room: new_room_id,
})
}
/// #GET /_matrix/client/r0/publicRooms
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
#[endpoint]
pub(super) async fn get_public_rooms(
_aa: AuthArgs,
args: PublicRoomsReqArgs,
) -> JsonResult<PublicRoomsResBody> {
let body = crate::directory::get_public_rooms(
args.server.as_deref(),
args.limit,
args.since.as_deref(),
&PublicRoomFilter::default(),
&RoomNetwork::Matrix,
)
.await?;
json_ok(body)
}
/// #POST /_matrix/client/r0/publicRooms
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
#[endpoint]
pub(super) async fn get_filtered_public_rooms(
_aa: AuthArgs,
args: JsonBody<PublicRoomsFilteredReqBody>,
) -> JsonResult<PublicRoomsResBody> {
let body = crate::directory::get_public_rooms(
args.server.as_deref(),
args.limit,
args.since.as_deref(),
&args.filter,
&args.room_network,
)
.await?;
json_ok(body)
}
/// #POST /_matrix/client/r0/createRoom
/// Creates a new room.
///
/// - Room ID is randomly generated
/// - Create alias if room_alias_name is set
/// - Send create event
/// - Join sender user
/// - Send power levels event
/// - Send canonical room alias
/// - Send join rules
/// - Send history visibility
/// - Send guest access
/// - Send events listed in initial state
/// - Send events implied by `name` and `topic`
/// - Send invite events
#[endpoint]
pub(super) async fn create_room(
_aa: AuthArgs,
body: JsonBody<CreateRoomReqBody>,
depot: &mut Depot,
) -> JsonResult<CreateRoomResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let conf = config::get();
// let room_version = conf.default_room_version.clone();
let room_version = match body.room_version.clone() {
Some(room_version) => {
if config::supports_room_version(&room_version) {
room_version
} else {
return Err(MatrixError::unsupported_room_version(
"this server does not support that room version",
)
.into());
}
}
None => conf.default_room_version.clone(),
};
let version_rules = crate::room::get_version_rules(&room_version)?;
if !conf.allow_room_creation && authed.appservice.is_none() && !authed.is_admin() {
return Err(MatrixError::forbidden("room creation has been disabled", None).into());
}
let alias: Option<OwnedRoomAliasId> = if let Some(localpart) = &body.room_alias_name {
// TODO: Check for invalid characters and maximum length
let alias = RoomAliasId::parse(format!("#{}:{}", localpart, &conf.server_name))
.map_err(|_| MatrixError::invalid_param("Invalid alias."))?;
if room::resolve_local_alias(&alias).is_ok() {
return Err(MatrixError::room_in_use("room alias already exists").into());
} else {
Some(alias)
}
} else {
None
};
// Figure out preset. We need it for preset specific events
let preset = body.preset.clone().unwrap_or(match &body.visibility {
Visibility::Private => RoomPreset::PrivateChat,
Visibility::Public => RoomPreset::PublicChat,
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
});
let (room_id, state_lock) = match version_rules.room_id_format {
RoomIdFormatVersion::V1 => {
create_create_event_legacy(sender_id, &body, &room_version, &version_rules).await?
}
RoomIdFormatVersion::V2 => {
create_create_event(sender_id, &body, &preset, &room_version, &version_rules).await?
}
};
// 2. Let the room creator join
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
display_name: crate::data::user::display_name(sender_id).ok().flatten(),
avatar_url: crate::data::user::avatar_url(sender_id).ok().flatten(),
is_direct: Some(body.is_direct),
third_party_invite: None,
blurhash: crate::data::user::blurhash(sender_id).ok().flatten(),
reason: None,
join_authorized_via_users_server: None,
extra_data: Default::default(),
})
.expect("event is valid, we just created it"),
state_key: Some(sender_id.to_string()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
// 3. Power levels
let mut users = BTreeMap::new();
if !version_rules
.authorization
.explicitly_privilege_room_creators
{
users.insert(sender_id.to_owned(), 100);
if preset == RoomPreset::TrustedPrivateChat {
for invitee_id in &body.invite {
if user_is_ignored(sender_id, invitee_id) || user_is_ignored(invitee_id, sender_id)
{
continue;
}
users.insert(invitee_id.to_owned(), 100);
}
}
}
let power_levels_content = default_power_levels_content(
&version_rules.authorization,
body.preset.as_ref(),
body.power_level_content_override.as_ref(),
&body.visibility,
users,
)?;
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&power_levels_content)?,
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
// 4. Canonical room alias
if let Some(room_alias_id) = &alias {
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: Some(room_alias_id.to_owned()),
alt_aliases: vec![],
})
.expect("we checked that alias earlier, it must be fine"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await
.unwrap();
}
// 5. Events set by preset
// 5.1 Join Rules
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomJoinRules,
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
RoomPreset::PublicChat => JoinRule::Public,
// according to spec "invite" is the default
_ => JoinRule::Invite,
}))
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
// 5.2 History Visibility
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomHistoryVisibility,
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
HistoryVisibility::Shared,
))
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
// 5.3 Guest Access
// timeline::build_and_append_pdu(
// PduBuilder {
// event_type: TimelineEventType::RoomGuestAccess,
// content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
// RoomPreset::PublicChat => GuestAccess::Forbidden,
// _ => GuestAccess::CanJoin,
// }))
// .expect("event is valid, we just created it"),
// state_key: Some("".to_owned()),
// ..Default::default()
// },
// sender_id,
// &room_id,
// &state_lock,
// )?;
if preset != RoomPreset::PublicChat {
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomGuestAccess,
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::CanJoin))
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
}
// 6. Events listed in initial_state
for event in &body.initial_state {
let mut pdu_builder = event.deserialize_as::<PduBuilder>().map_err(|e| {
warn!("Invalid initial state event: {:?}", e);
MatrixError::invalid_param("Invalid initial state event.")
})?;
// Implicit state key defaults to ""
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
// Silently skip encryption events if they are not allowed
if pdu_builder.event_type == TimelineEventType::RoomEncryption && !conf.allow_encryption {
continue;
}
timeline::build_and_append_pdu(
pdu_builder,
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
}
// 7. Events implied by name and topic
if let Some(name) = &body.name {
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomName,
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
}
if let Some(topic) = &body.topic {
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomTopic,
content: to_raw_value(&RoomTopicEventContent::new(topic.clone()))
.expect("event is valid, we just created it"),
state_key: Some("".to_owned()),
..Default::default()
},
sender_id,
&room_id,
&room_version,
&state_lock,
)
.await?;
}
drop(state_lock);
// 8. Events implied by invite (and TODO: invite_3pid)
for user_id in &body.invite {
if let Err(e) =
crate::membership::invite_user(sender_id, user_id, &room_id, None, body.is_direct).await
{
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | true |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/directory.rs | crates/server/src/routing/client/directory.rs | mod alias;
mod room;
use salvo::prelude::*;
pub fn public_router() -> Router {
Router::with_path("directory")
.push(Router::with_path("room/{room_alias}").get(alias::get_alias))
.push(Router::with_path("list/room/{room_id}").get(room::get_visibility))
}
pub fn authed_router() -> Router {
Router::with_path("directory")
.push(
Router::with_path("room/{room_alias}")
.put(alias::upsert_alias)
.delete(alias::delete_alias),
)
.push(
Router::with_path("list")
.push(
Router::with_path("appservice/{network_id}/{room_id}")
.put(room::set_visibility_with_network_id),
)
.push(
Router::with_path("room/{room_id}")
.get(room::get_visibility)
.put(room::set_visibility),
),
)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/profile.rs | crates/server/src/routing/client/profile.rs | use diesel::prelude::*;
use palpo_core::UnixMillis;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde_json::value::to_raw_value;
use crate::core::client::profile::*;
use crate::core::events::room::member::RoomMemberEventContent;
use crate::core::events::{StateEventType, TimelineEventType};
use crate::core::federation::query::{ProfileReqArgs, profile_request};
use crate::core::identifiers::*;
use crate::core::user::{ProfileField, ProfileResBody};
use crate::data::schema::*;
use crate::data::user::{DbProfile, NewDbPresence};
use crate::data::{connect, diesel_exists};
use crate::exts::*;
use crate::room::timeline;
use crate::{
AppError, AuthArgs, EmptyResult, JsonResult, MatrixError, PduBuilder, data, empty_ok, hoops,
json_ok, room,
};
pub fn public_router() -> Router {
Router::with_path("profile/{user_id}")
.get(get_profile)
.push(Router::with_path("avatar_url").get(get_avatar_url))
.push(Router::with_path("displayname").get(get_display_name))
}
pub fn authed_router() -> Router {
Router::with_path("profile/{user_id}")
.hoop(hoops::limit_rate)
.push(Router::with_path("avatar_url").put(set_avatar_url))
.push(Router::with_path("displayname").put(set_display_name))
}
/// #GET /_matrix/client/r0/profile/{user_id}
/// Returns the display_name, avatar_url and blurhash of the user.
///
/// - If user is on another server: Fetches profile over federation
#[endpoint]
async fn get_profile(_aa: AuthArgs, user_id: PathParam<OwnedUserId>) -> JsonResult<ProfileResBody> {
let user_id = user_id.into_inner();
let server_name = user_id.server_name().to_owned();
if !server_name.is_valid() {
return Err(MatrixError::not_found("profile not found").into());
}
if user_id.is_remote() {
let request = profile_request(
&server_name.origin().await,
ProfileReqArgs {
user_id,
field: None,
},
)?
.into_inner();
let profile = crate::sending::send_federation_request(&server_name, request, Some(5))
.await?
.json::<ProfileResBody>()
.await?;
return json_ok(profile);
}
let Ok(DbProfile {
blurhash,
avatar_url,
display_name,
..
}) = user_profiles::table
.filter(user_profiles::user_id.eq(&user_id))
.filter(user_profiles::room_id.is_null())
.first::<DbProfile>(&mut connect()?)
else {
return json_ok(ProfileResBody {
avatar_url: None,
blurhash: None,
display_name: Some(user_id.localpart().to_owned()),
});
};
json_ok(ProfileResBody {
avatar_url,
blurhash,
display_name,
})
}
/// #GET /_matrix/client/r0/profile/{user_id}/avatar_url
/// Returns the avatar_url and blurhash of the user.
///
/// - If user is on another server: Fetches avatar_url and blurhash over federation
#[endpoint]
async fn get_avatar_url(
_aa: AuthArgs,
user_id: PathParam<OwnedUserId>,
) -> JsonResult<AvatarUrlResBody> {
let user_id = user_id.into_inner();
if user_id.is_remote() {
let server_name = user_id.server_name().to_owned();
let request = profile_request(
&server_name.origin().await,
ProfileReqArgs {
user_id,
field: Some(ProfileField::AvatarUrl),
},
)?
.into_inner();
let body: AvatarUrlResBody =
crate::sending::send_federation_request(&server_name, request, None)
.await?
.json::<AvatarUrlResBody>()
.await?;
return json_ok(body);
}
let DbProfile {
avatar_url,
blurhash,
..
} = user_profiles::table
.filter(user_profiles::user_id.eq(&user_id))
.first::<DbProfile>(&mut connect()?)?;
json_ok(AvatarUrlResBody {
avatar_url,
blurhash,
})
}
/// #PUT /_matrix/client/r0/profile/{user_id}/avatar_url
/// Updates the avatar_url and blurhash.
///
/// - Also makes sure other users receive the update using presence EDUs
#[endpoint]
async fn set_avatar_url(
_aa: AuthArgs,
user_id: PathParam<OwnedUserId>,
body: JsonBody<SetAvatarUrlReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let user_id = user_id.into_inner();
let authed = depot.authed_info()?;
if authed.user_id() != user_id {
return Err(MatrixError::forbidden("forbidden", None).into());
}
let SetAvatarUrlReqBody {
avatar_url,
blurhash,
} = body.into_inner();
let query = user_profiles::table
.filter(user_profiles::user_id.eq(&user_id))
.filter(user_profiles::room_id.is_null());
let profile_exists = diesel_exists!(query, &mut connect()?)?;
if profile_exists {
#[derive(AsChangeset, Debug)]
#[diesel(table_name = user_profiles, treat_none_as_null = true)]
struct UpdateParams {
avatar_url: Option<OwnedMxcUri>,
blurhash: Option<String>,
}
let updata_params = UpdateParams {
avatar_url: avatar_url.clone(),
blurhash,
};
diesel::update(query)
.set(updata_params)
.execute(&mut connect()?)?;
} else {
return Err(StatusError::not_found().brief("Profile not found.").into());
}
// Send a new membership event and presence update into all joined rooms
let all_joined_rooms: Vec<_> = data::user::joined_rooms(&user_id)?
.into_iter()
.map(|room_id| {
Ok::<_, AppError>((
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
avatar_url: avatar_url.clone(),
..room::get_state_content::<RoomMemberEventContent>(
&room_id,
&StateEventType::RoomMember,
user_id.as_str(),
None,
)?
})
.expect("event is valid, we just created it"),
state_key: Some(user_id.to_string()),
..Default::default()
},
room_id,
))
})
.filter_map(|r| r.ok())
.collect();
// Presence update
crate::data::user::set_presence(
NewDbPresence {
user_id: user_id.clone(),
stream_id: None,
state: None,
status_msg: None,
last_active_at: Some(UnixMillis::now()),
last_federation_update_at: None,
last_user_sync_at: None,
currently_active: None,
occur_sn: None,
},
true,
)?;
for (pdu_builder, room_id) in all_joined_rooms {
let _ = timeline::build_and_append_pdu(
pdu_builder,
&user_id,
&room_id,
&room::get_version(&room_id)?,
&room::lock_state(&room_id).await,
)
.await?;
}
empty_ok()
}
/// #GET /_matrix/client/r0/profile/{user_id}/displayname
/// Returns the display_name of the user.
///
/// - If user is on another server: Fetches display_name over federation
#[endpoint]
async fn get_display_name(
_aa: AuthArgs,
user_id: PathParam<OwnedUserId>,
) -> JsonResult<DisplayNameResBody> {
let user_id = user_id.into_inner();
if user_id.is_remote() {
let server_name = user_id.server_name().to_owned();
let request = profile_request(
&server_name.origin().await,
ProfileReqArgs {
user_id,
field: Some(ProfileField::DisplayName),
},
)?
.into_inner();
let body = crate::sending::send_federation_request(&server_name, request, None)
.await?
.json::<DisplayNameResBody>()
.await?;
return json_ok(body);
}
json_ok(DisplayNameResBody {
display_name: data::user::display_name(&user_id).ok().flatten(),
})
}
/// #PUT /_matrix/client/r0/profile/{user_id}/displayname
/// Updates the display_name.
///
/// - Also makes sure other users receive the update using presence EDUs
#[endpoint]
async fn set_display_name(
_aa: AuthArgs,
user_id: PathParam<OwnedUserId>,
body: JsonBody<SetDisplayNameReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let user_id = user_id.into_inner();
let authed = depot.authed_info()?;
if authed.user_id() != user_id {
return Err(MatrixError::forbidden("forbidden", None).into());
}
let SetDisplayNameReqBody { display_name } = body.into_inner();
if let Some(display_name) = display_name.as_deref() {
data::user::set_display_name(&user_id, display_name)?;
}
// Send a new membership event and presence update into all joined rooms
let all_joined_rooms: Vec<_> = data::user::joined_rooms(&user_id)?
.into_iter()
.map(|room_id| {
Ok::<_, AppError>((
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
display_name: display_name.clone(),
..room::get_state_content::<RoomMemberEventContent>(
&room_id,
&StateEventType::RoomMember,
user_id.as_str(),
None,
)?
})
.expect("event is valid, we just created it"),
state_key: Some(user_id.to_string()),
..Default::default()
},
room_id,
))
})
.filter_map(|r| r.ok())
.collect();
for (pdu_builder, room_id) in all_joined_rooms {
let _ = timeline::build_and_append_pdu(
pdu_builder,
&user_id,
&room_id,
&crate::room::get_version(&room_id)?,
&room::lock_state(&room_id).await,
)
.await?;
// Presence update
crate::data::user::set_presence(
NewDbPresence {
user_id: user_id.clone(),
stream_id: None,
state: None,
status_msg: None,
last_active_at: Some(UnixMillis::now()),
last_federation_update_at: None,
last_user_sync_at: None,
currently_active: None,
occur_sn: None,
},
true,
)?;
}
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/pusher.rs | crates/server/src/routing/client/pusher.rs | use salvo::oapi::extract::JsonBody;
use salvo::prelude::*;
use crate::core::client::push::{PushersResBody, SetPusherReqBody};
use crate::core::push::Pusher;
use crate::data::DataError;
use crate::{DepotExt, EmptyResult, JsonResult, data, empty_ok, hoops, json_ok};
pub fn authed_router() -> Router {
Router::with_path("pushers")
.get(pushers)
.push(Router::with_hoop(hoops::limit_rate).push(Router::with_path("set").post(set_pusher)))
}
/// #GET /_matrix/client/r0/pushers
/// Gets all currently active pushers for the sender user.
#[endpoint]
async fn pushers(depot: &mut Depot) -> JsonResult<PushersResBody> {
let authed = depot.authed_info()?;
json_ok(PushersResBody {
pushers: data::user::pusher::get_pushers(authed.user_id())?
.into_iter()
.map(TryInto::<Pusher>::try_into)
.collect::<Result<Vec<_>, DataError>>()?,
})
}
/// #POST /_matrix/client/r0/pushers/set
/// Adds a pusher for the sender user.
///
/// - TODO: Handle `append`
#[endpoint]
async fn set_pusher(body: JsonBody<SetPusherReqBody>, depot: &mut Depot) -> EmptyResult {
let authed = depot.authed_info()?;
crate::user::pusher::set_pusher(authed, body.into_inner().0)?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/account.rs | crates/server/src/routing/client/account.rs | mod password;
mod threepid;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::account::{
DeactivateReqBody, DeactivateResBody, ThirdPartyIdRemovalStatus, WhoamiResBody,
};
use crate::core::client::uiaa::{AuthFlow, AuthType, UiaaInfo};
use crate::{
AuthArgs, EmptyResult, JsonResult, MatrixError, SESSION_ID_LENGTH, data, exts::*, hoops,
json_ok, utils,
};
pub fn public_router() -> Router {
Router::with_path("account")
// .push(
// Router::with_path("3pid")
// .get(get_3pid)
// .push(Router::with_path("add").post(add_3pid))
// .push(Router::with_path("bind").post(bind_3pid))
// .push(Router::with_path("unbind").post(unbind_3pid))
// .push(Router::with_path("delete").post(delete_3pid))
// .push(Router::with_path("msisdn/requestToken").post(msisdn_request_token))
// .push(Router::with_path("email/requestTZoken").post(email_request_token)),
// )
.push(Router::with_path("email/requestToken").post(token_via_email))
.push(Router::with_path("msisdn/requestToken").post(token_via_msisdn))
}
pub fn authed_router() -> Router {
Router::with_path("account")
// .push(
// Router::with_path("3pid")
// .get(get_3pid)
// .push(Router::with_path("add").post(add_3pid))
// .push(Router::with_path("bind").post(bind_3pid))
// .push(Router::with_path("unbind").post(unbind_3pid))
// .push(Router::with_path("delete").post(delete_3pid))
// .push(Router::with_path("msisdn/requestToken").post(msisdn_request_token))
// .push(Router::with_path("email/requestTZoken").post(email_request_token)),
// )
.push(
Router::with_path("whoami")
.hoop(hoops::limit_rate)
.get(whoami),
)
.push(
Router::with_path("deactivate")
.hoop(hoops::limit_rate)
.post(deactivate),
)
.push(password::authed_router())
.push(threepid::authed_router())
}
/// #POST /_matrix/client/v3/account/3pid/email/requestToken
/// "This API should be used to request validation tokens when adding an email address to an account"
///
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
#[endpoint]
async fn token_via_email(_aa: AuthArgs) -> EmptyResult {
Err(MatrixError::threepid_denied("Third party identifier is not allowed").into())
}
/// #POST /_matrix/client/v3/account/3pid/msisdn/requestToken
/// "This API should be used to request validation tokens when adding an phone number to an account"
///
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
#[endpoint]
async fn token_via_msisdn(_aa: AuthArgs) -> EmptyResult {
Err(MatrixError::threepid_denied("Third party identifier is not allowed").into())
}
/// #GET _matrix/client/r0/account/whoami
///
/// Get user_id of the sender user.
///
/// Note: Also works for Application Services
#[endpoint]
async fn whoami(_aa: AuthArgs, depot: &mut Depot) -> JsonResult<WhoamiResBody> {
let authed = depot.take_authed_info()?;
json_ok(WhoamiResBody {
user_id: authed.user_id().to_owned(),
device_id: Some(authed.device_id().to_owned()),
is_guest: false,
})
}
/// #POST /_matrix/client/r0/account/deactivate
/// Deactivate sender user account.
///
/// - Leaves all rooms and rejects all invitations
/// - Invalidates all access tokens
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets all to-device events
/// - Triggers device list updates
/// - Removes ability to log in again
#[endpoint]
async fn deactivate(
_aa: AuthArgs,
body: JsonBody<DeactivateReqBody>,
depot: &mut Depot,
res: &mut Response,
) -> JsonResult<DeactivateResBody> {
let authed = depot.authed_info()?;
let mut uiaa_info = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
session: None,
auth_error: None,
};
let Some(auth) = &body.auth else {
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
return Err(uiaa_info.into());
};
if crate::uiaa::try_auth(authed.user_id(), authed.device_id(), auth, &uiaa_info).is_err() {
res.status_code(StatusCode::UNAUTHORIZED);
return Err(MatrixError::forbidden("Authentication failed.", None).into());
}
// Remove devices and mark account as deactivated
data::user::deactivate(authed.user_id())?;
// info!("User {} deactivated their account.", authed.user_id());
// crate::admin::send_message(RoomMessageEventContent::notice_plain(format!(
// "User {authed.user_id()} deactivated their account."
// )));
json_ok(DeactivateResBody {
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
})
}
// msc3391
#[handler]
pub(super) fn delete_account_data_msc3391(
_req: &mut Request,
_res: &mut Response,
) -> JsonResult<()> {
json_ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/event.rs | crates/server/src/routing/client/room/event.rs | use std::collections::HashSet;
use palpo_core::Direction;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde_json::value::to_raw_value;
use state::DbRoomStateField;
use crate::core::client::filter::LazyLoadOptions;
use crate::core::client::redact::{RedactEventReqArgs, RedactEventReqBody, RedactEventResBody};
use crate::core::client::room::{
ContextReqArgs, ContextResBody, ReportContentReqBody, RoomEventResBody,
};
use crate::core::events::room::message::RoomMessageEventContent;
use crate::core::events::room::redaction::RoomRedactionEventContent;
use crate::core::events::{StateEventType, TimelineEventType};
use crate::core::room::{RoomEventReqArgs, TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::event::fetching::fetch_event;
use crate::event::handler::{process_pulled_pdu, remote_timestamp_to_event};
use crate::event::parse_fetched_pdu;
use crate::room::{state, timeline};
use crate::utils::HtmlEscape;
use crate::{AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, empty_ok, json_ok, room};
use crate::{OptionalExtension, PduBuilder};
/// #GET /_matrix/client/r0/rooms/{room_id}/event/{event_id}
/// Gets a single event.
///
/// - You have to currently be joined to the room (TODO: Respect history visibility)
#[endpoint]
pub(super) fn get_room_event(
_aa: AuthArgs,
args: RoomEventReqArgs,
depot: &mut Depot,
) -> JsonResult<RoomEventResBody> {
let authed = depot.authed_info()?;
let event = DbEvent::get_by_id(&args.event_id)?;
if event.rejection_reason.is_some() {
warn!("event {} is rejected", &args.event_id);
return Err(MatrixError::not_found("event not found").into());
}
if event.is_outlier {
warn!("event {} is outlier", &args.event_id);
return Err(MatrixError::not_found("event not found").into());
}
let event = timeline::get_pdu(&args.event_id)?;
if !state::user_can_see_event(authed.user_id(), &args.event_id)? {
return Err(MatrixError::not_found("event not found").into());
}
let mut event = event.clone();
event.add_age()?;
json_ok(RoomEventResBody::new(event.to_room_event()))
}
/// #POST /_matrix/client/r0/rooms/{room_id}/report/{event_id}
/// Reports an inappropriate event to homeserver admins
#[endpoint]
pub(super) async fn report(
_aa: AuthArgs,
args: RoomEventReqArgs,
body: JsonBody<ReportContentReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let pdu = timeline::get_pdu(&args.event_id)?;
if let Some(true) = body.score.map(|s| !(-100..=0).contains(&s)) {
return Err(MatrixError::invalid_param("invalid score, must be within 0 to -100").into());
};
if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) {
return Err(MatrixError::invalid_param(
"reason too long, should be 250 characters or fewer",
)
.into());
};
let _ = crate::admin::send_message(RoomMessageEventContent::text_html(
format!(
"Report received from: {}\n\n\
Event ID: {:?}\n\
Room ID: {:?}\n\
Sent By: {:?}\n\n\
Report Score: {:?}\n\
Report Reason: {:?}",
authed.user_id(),
pdu.event_id,
pdu.room_id,
pdu.sender,
body.score,
body.reason
),
format!(
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0:?}\">{0:?}\
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1:?}</code>\
<a href=\"https://matrix.to/#/{2:?}/{1:?}\">🔗</a></li><li>Room ID: <code>{2:?}</code>\
</li><li>Sent By: <a href=\"https://matrix.to/#/{3:?}\">{3:?}</a></li></ul></li><li>\
Report Info<ul><li>Report Score: {4:?}</li><li>Report Reason: {5}</li></ul></li>\
</ul></details>",
authed.user_id(),
pdu.event_id,
pdu.room_id,
pdu.sender,
body.score,
HtmlEscape(body.reason.as_deref().unwrap_or(""))
),
)).await;
empty_ok()
}
/// #GET /_matrix/client/r0/rooms/{room_id}/context/{event_id}
/// Allows loading room history around an event.
///
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
/// joined, depending on history_visibility)
#[endpoint]
pub(super) fn get_context(
_aa: AuthArgs,
args: ContextReqArgs,
depot: &mut Depot,
) -> JsonResult<ContextResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let (lazy_load_enabled, lazy_load_send_redundant) = match &args.filter.lazy_load_options {
LazyLoadOptions::Enabled {
include_redundant_members,
} => (true, *include_redundant_members),
_ => (false, false),
};
let mut lazy_loaded = HashSet::new();
let base_token = crate::event::get_live_token(&args.event_id)
.map_err(|_| MatrixError::not_found("base event id not found"))?;
let base_event = timeline::get_pdu(&args.event_id)?;
let room_id = base_event.room_id.clone();
if !state::user_can_see_event(sender_id, &args.event_id)? {
return Err(
MatrixError::forbidden("you don't have permission to view this event", None).into(),
);
}
if !crate::room::lazy_loading::lazy_load_was_sent_before(
sender_id,
authed.device_id(),
&room_id,
&base_event.sender,
)? || lazy_load_send_redundant
{
lazy_loaded.insert(base_event.sender.as_str().to_owned());
}
// Use limit with maximum 100
let limit = args.limit.min(100);
let base_event = base_event.to_room_event();
let events_before = timeline::stream::load_pdus_backward(
Some(sender_id),
&room_id,
Some(base_token),
None,
None,
limit / 2,
)?
.into_iter()
.filter(|(_, pdu)| state::user_can_see_event(sender_id, &pdu.event_id).unwrap_or(false))
.collect::<Vec<_>>();
for (_, event) in &events_before {
if !crate::room::lazy_loading::lazy_load_was_sent_before(
sender_id,
authed.device_id(),
&room_id,
&event.sender,
)? || lazy_load_send_redundant
{
lazy_loaded.insert(event.sender.as_str().to_owned());
}
}
let start_token = events_before
.last()
.map(|(_, pdu)| pdu.historic_token())
.unwrap_or_else(|| base_token);
let events_before = events_before
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect::<Vec<_>>();
let events_after = timeline::stream::load_pdus_forward(
Some(sender_id),
&room_id,
Some(base_token),
None,
None,
limit / 2,
)?;
for (_, event) in &events_after {
if !crate::room::lazy_loading::lazy_load_was_sent_before(
sender_id,
authed.device_id(),
&room_id,
&event.sender,
)? || lazy_load_send_redundant
{
lazy_loaded.insert(event.sender.as_str().to_owned());
}
}
let frame_id = match state::get_pdu_frame_id(
events_after
.last()
.map_or(&*args.event_id, |(_, e)| &*e.event_id),
) {
Ok(s) => s,
Err(_) => crate::room::get_frame_id(&room_id, None).unwrap_or_default(),
};
let state_ids = state::get_full_state_ids(frame_id).unwrap_or_default();
let end_token = events_after
.last()
.map(|(_, e)| e.live_token())
.unwrap_or_else(|| base_token);
let events_after: Vec<_> = events_after
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
let mut state = Vec::new();
for (field_id, event_id) in state_ids {
let DbRoomStateField {
event_ty,
state_key,
..
} = state::get_field(field_id)?;
if event_ty != StateEventType::RoomMember {
let pdu = match timeline::get_pdu(&event_id) {
Ok(pdu) => pdu,
Err(_) => {
error!("pdu in state not found: {}", event_id);
continue;
}
};
state.push(pdu.to_state_event());
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
let pdu = match timeline::get_pdu(&event_id) {
Ok(pdu) => pdu,
Err(_) => {
error!("pdu in state not found: {}", event_id);
continue;
}
};
state.push(pdu.to_state_event());
}
}
json_ok(ContextResBody {
start: Some(start_token.to_string()),
end: Some(end_token.to_string()),
events_before,
event: Some(base_event),
events_after,
state,
})
}
/// #PUT /_matrix/client/r0/rooms/{room_id}/redact/{event_id}/{txn_id}
/// Tries to send a redaction event into the room.
///
/// - TODO: Handle txn id
#[endpoint]
pub(super) async fn send_redact(
_aa: AuthArgs,
args: RedactEventReqArgs,
body: JsonBody<RedactEventReqBody>,
depot: &mut Depot,
) -> JsonResult<RedactEventResBody> {
let authed = depot.authed_info()?;
let state_lock = crate::room::lock_state(&args.room_id).await;
let event_id = timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomRedaction,
content: to_raw_value(&RoomRedactionEventContent {
redacts: Some(args.event_id.clone()),
reason: body.reason.clone(),
})
.expect("event is valid, we just created it"),
redacts: Some(args.event_id),
..Default::default()
},
authed.user_id(),
&args.room_id,
&crate::room::get_version(&args.room_id)?,
&state_lock,
)
.await?
.pdu
.event_id;
let event_id = (*event_id).to_owned();
json_ok(RedactEventResBody { event_id })
}
/// #GET /_matrix/client/r0/rooms/{room_id}/timestamp_to_event
#[endpoint]
pub(super) async fn timestamp_to_event(
_aa: AuthArgs,
args: TimestampToEventReqArgs,
depot: &mut Depot,
) -> JsonResult<TimestampToEventResBody> {
let authed = depot.authed_info()?;
if !room::user::is_joined(authed.user_id(), &args.room_id)? {
return Err(MatrixError::forbidden("You are not joined to this room.", None).into());
}
let local_event =
crate::event::get_event_for_timestamp(&args.room_id, args.ts, args.dir).optional()?;
let mut is_event_next_to_backward_gap = false;
let mut is_event_next_to_forward_gap = false;
if let Some(local_event) = &local_event {
let local_event = timeline::get_pdu(&local_event.0)?;
match args.dir {
Direction::Backward => {
is_event_next_to_forward_gap = timeline::is_event_next_to_forward_gap(&local_event)?
}
Direction::Forward => {
is_event_next_to_backward_gap =
timeline::is_event_next_to_backward_gap(&local_event)?
}
}
}
if local_event.is_none() || is_event_next_to_backward_gap || is_event_next_to_forward_gap {
let remote_servers = room::admin_servers(&args.room_id, false)?;
let Ok((
remote_server,
TimestampToEventResBody {
event_id,
origin_server_ts,
},
)) = remote_timestamp_to_event(
&remote_servers,
&args.room_id,
args.dir,
args.ts,
local_event.as_ref(),
)
.await
else {
return if let Some((event_id, origin_server_ts)) = local_event {
json_ok(TimestampToEventResBody {
event_id,
origin_server_ts,
})
} else {
Err(StatusError::not_found().brief("no event found").into())
};
};
let room_version = crate::room::get_version(&args.room_id)?;
let Ok((event_id, event_value)) = parse_fetched_pdu(
&args.room_id,
&room_version,
&fetch_event(&remote_server, &event_id).await?.pdu,
) else {
error!("failed parse featch pdu for timestamp to event");
return json_ok(TimestampToEventResBody {
event_id,
origin_server_ts,
});
};
process_pulled_pdu(
&remote_server,
&event_id,
&args.room_id,
&room_version,
event_value,
true,
)
.await?;
return json_ok(TimestampToEventResBody {
event_id,
origin_server_ts,
});
}
if let Some((event_id, origin_server_ts)) = local_event {
json_ok(TimestampToEventResBody {
event_id,
origin_server_ts,
})
} else {
Err(StatusError::not_found().brief("no event found").into())
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/relation.rs | crates/server/src/routing/client/room/relation.rs | use salvo::prelude::*;
use crate::core::client::relation::{
RelatingEventsReqArgs, RelatingEventsWithRelTypeAndEventTypeReqArgs,
RelatingEventsWithRelTypeReqArgs, RelationEventsResBody,
};
use crate::{AuthArgs, DepotExt, JsonResult, json_ok};
/// #GET /_matrix/client/r0/rooms/{room_id}/relations/{event_id}
#[endpoint]
pub(super) fn get_relation(
_aa: AuthArgs,
args: RelatingEventsReqArgs,
depot: &mut Depot,
) -> JsonResult<RelationEventsResBody> {
let authed = depot.authed_info()?;
let body = crate::room::pdu_metadata::paginate_relations_with_filter(
authed.user_id(),
&args.room_id,
&args.event_id,
None,
None,
args.from.as_deref(),
args.to.as_deref(),
args.limit,
args.recurse,
args.dir,
)?;
json_ok(body)
}
/// #GET /_matrix/client/r0/rooms/{room_id}/relations/{event_id}/{rel_type}
#[endpoint]
pub(super) async fn get_relation_by_rel_type(
_aa: AuthArgs,
args: RelatingEventsWithRelTypeReqArgs,
depot: &mut Depot,
) -> JsonResult<RelationEventsResBody> {
let authed = depot.authed_info()?;
let body = crate::room::pdu_metadata::paginate_relations_with_filter(
authed.user_id(),
&args.room_id,
&args.event_id,
None,
Some(args.rel_type.clone()),
args.from.as_deref(),
args.to.as_deref(),
args.limit,
args.recurse,
args.dir,
)?;
json_ok(body)
}
/// #GET /_matrix/client/r0/rooms/{room_id}/relations/{event_id}/{rel_type}/{event_type}
#[endpoint]
pub(super) async fn get_relation_by_rel_type_and_event_type(
_aa: AuthArgs,
args: RelatingEventsWithRelTypeAndEventTypeReqArgs,
depot: &mut Depot,
) -> JsonResult<RelationEventsResBody> {
let authed = depot.authed_info()?;
let body = crate::room::pdu_metadata::paginate_relations_with_filter(
authed.user_id(),
&args.room_id,
&args.event_id,
Some(args.event_type.clone()),
Some(args.rel_type.clone()),
args.from.as_deref(),
args.to.as_deref(),
args.limit,
args.recurse,
args.dir,
)?;
json_ok(body)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/state.rs | crates/server/src/routing/client/room/state.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::client::room::ReportContentReqBody;
use crate::core::client::state::{
SendStateEventReqBody, SendStateEventResBody, StateEventFormat, StateEventsForEmptyKeyReqArgs,
StateEventsForKeyReqArgs, StateEventsForKeyResBody, StateEventsResBody,
};
use crate::core::client::typing::{CreateTypingEventReqBody, Typing};
use crate::core::events::room::message::RoomMessageEventContent;
use crate::core::identifiers::*;
use crate::core::room::{RoomEventReqArgs, RoomEventTypeReqArgs, RoomTypingReqArgs};
use crate::room::{state, timeline};
use crate::utils::HtmlEscape;
use crate::{AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, empty_ok, json_ok, room};
/// #GET /_matrix/client/r0/rooms/{room_id}/state
/// Get all state events for a room.
///
/// - If not joined: Only works if current room history visibility is world readable
#[endpoint]
pub(super) fn get_state(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
depot: &mut Depot,
) -> JsonResult<StateEventsResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let room_id = room_id.into_inner();
let _until_sn = if !state::user_can_see_events(sender_id, &room_id)? {
if let Ok(leave_sn) = room::user::leave_sn(sender_id, &room_id) {
Some(leave_sn)
} else {
return Err(MatrixError::forbidden(
"you don't have permission to view this room",
None,
)
.into());
}
} else {
None
};
let frame_id = room::get_frame_id(&room_id, None).unwrap_or_default();
let room_state = state::get_full_state(frame_id)
.unwrap_or_default()
.values()
.map(|pdu| pdu.to_state_event())
.collect();
json_ok(StateEventsResBody::new(room_state))
}
/// #POST /_matrix/client/r0/rooms/{room_id}/report/{event_id}
/// Reports an inappropriate event to homeserver admins
#[endpoint]
pub async fn report(
_aa: AuthArgs,
args: RoomEventReqArgs,
body: JsonBody<ReportContentReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let pdu = match timeline::get_pdu(&args.event_id) {
Ok(pdu) => pdu,
_ => return Err(MatrixError::invalid_param("Invalid Event ID").into()),
};
if let Some(true) = body.score.map(|s| !(-100..=0).contains(&s)) {
return Err(MatrixError::invalid_param("Invalid score, must be within 0 to -100").into());
};
if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) {
return Err(MatrixError::invalid_param(
"Reason too long, should be 250 characters or fewer",
)
.into());
};
let _ = crate::admin::send_message(RoomMessageEventContent::text_html(
format!(
"Report received from: {}\n\n\
Event ID: {:?}\n\
Room ID: {:?}\n\
Sent By: {:?}\n\n\
Report Score: {:?}\n\
Report Reason: {:?}",
authed.user_id(),
pdu.event_id,
pdu.room_id,
pdu.sender,
body.score,
body.reason
),
format!(
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0:?}\">{0:?}\
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1:?}</code>\
<a href=\"https://matrix.to/#/{2:?}/{1:?}\">🔗</a></li><li>Room ID: <code>{2:?}</code>\
</li><li>Sent By: <a href=\"https://matrix.to/#/{3:?}\">{3:?}</a></li></ul></li><li>\
Report Info<ul><li>Report Score: {4:?}</li><li>Report Reason: {5}</li></ul></li>\
</ul></details>",
authed.user_id(),
pdu.event_id,
pdu.room_id,
pdu.sender,
body.score,
HtmlEscape(body.reason.as_deref().unwrap_or(""))
),
)).await;
empty_ok()
}
/// #GET /_matrix/client/r0/rooms/{room_id}/state/{event_type}/{state_key}
/// Get single state event of a room.
///
/// - If not joined: Only works if current room history visibility is world readable
#[endpoint]
pub(super) fn state_for_key(
_aa: AuthArgs,
args: StateEventsForKeyReqArgs,
depot: &mut Depot,
) -> JsonResult<StateEventsForKeyResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let until_sn = if !state::user_can_see_events(sender_id, &args.room_id)? {
if let Ok(leave_sn) = room::user::leave_sn(sender_id, &args.room_id) {
Some(leave_sn)
} else {
return Err(MatrixError::forbidden(
"you don't have permission to view this room",
None,
)
.into());
}
} else {
None
};
let event = room::get_state(&args.room_id, &args.event_type, &args.state_key, until_sn)?;
let event_format = args
.format
.as_ref()
.is_some_and(|f| *f == StateEventFormat::Event);
json_ok(StateEventsForKeyResBody {
content: Some(event.get_content()?),
event: if event_format {
Some(event.to_state_event_value())
} else {
None
},
})
}
/// #GET /_matrix/client/r0/rooms/{room_id}/state/{event_type}
/// Get single state event of a room.
///
/// - If not joined: Only works if current room history visibility is world readable
#[endpoint]
pub(super) async fn state_for_empty_key(
_aa: AuthArgs,
args: StateEventsForEmptyKeyReqArgs,
depot: &mut Depot,
) -> JsonResult<StateEventsForKeyResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let until_sn = if !state::user_can_see_events(sender_id, &args.room_id)? {
if let Ok(leave_sn) = room::user::leave_sn(sender_id, &args.room_id) {
Some(leave_sn)
} else {
return Err(MatrixError::forbidden(
"you don't have permission to view this room",
None,
)
.into());
}
} else {
None
};
let event = room::get_state(&args.room_id, &args.event_type, "", until_sn)?;
let event_format = args
.format
.as_ref()
.is_some_and(|f| f.to_lowercase().eq("event"));
json_ok(StateEventsForKeyResBody {
content: Some(event.get_content()?),
event: if event_format {
Some(event.to_state_event_value())
} else {
None
},
})
}
/// #PUT /_matrix/client/r0/rooms/{room_id}/state/{event_type}/{state_key}
/// Sends a state event into the room.
///
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
/// - If event is new canonical_alias: Rejects if alias is incorrect
#[endpoint]
pub(super) async fn send_state_for_key(
_aa: AuthArgs,
args: StateEventsForKeyReqArgs,
body: JsonBody<SendStateEventReqBody>,
depot: &mut Depot,
) -> JsonResult<SendStateEventResBody> {
let authed = depot.authed_info()?;
let body = body.into_inner();
let event_id = crate::state::send_state_event_for_key(
authed.user_id(),
&args.room_id,
&crate::room::get_version(&args.room_id)?,
&args.event_type,
body.0,
args.state_key.to_owned(),
)
.await?;
json_ok(SendStateEventResBody {
event_id: (*event_id).to_owned(),
})
}
/// #PUT /_matrix/client/r0/rooms/{room_id}/state/{event_type}
/// Sends a state event into the room.
///
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
/// - If event is new canonical_alias: Rejects if alias is incorrect
#[endpoint]
pub(super) async fn send_state_for_empty_key(
_aa: AuthArgs,
args: RoomEventTypeReqArgs,
body: JsonBody<SendStateEventReqBody>,
depot: &mut Depot,
) -> JsonResult<SendStateEventResBody> {
let authed = depot.authed_info()?;
let body = body.into_inner();
let event_id = crate::state::send_state_event_for_key(
authed.user_id(),
&args.room_id,
&crate::room::get_version(&args.room_id)?,
&args.event_type.to_string().into(),
body.0,
"".into(),
)
.await?;
json_ok(SendStateEventResBody {
event_id: (*event_id).to_owned(),
})
}
/// #PUT /_matrix/client/r0/rooms/{room_id}/typing/{user_id}
/// Sets the typing state of the sender user.
#[endpoint]
pub async fn send_typing(
_aa: AuthArgs,
args: RoomTypingReqArgs,
body: JsonBody<CreateTypingEventReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
if !room::user::is_joined(authed.user_id(), &args.room_id)? {
return Err(MatrixError::forbidden("You are not in this room.", None).into());
}
if let Typing::Yes(duration) = body.state {
room::typing::add_typing(
authed.user_id(),
&args.room_id,
duration.as_millis() as u64 + UnixMillis::now().get(),
true,
)
.await?;
} else {
room::typing::remove_typing(authed.user_id(), &args.room_id, true).await?;
}
empty_ok()
}
#[endpoint]
pub(super) async fn timestamp(_aa: AuthArgs, depot: &mut Depot) -> EmptyResult {
//TODO:??
let _authed = depot.authed_info()?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/receipt.rs | crates/server/src/routing/client/room/receipt.rs | use std::collections::BTreeMap;
use salvo::oapi::extract::JsonBody;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::events::RoomAccountDataEventType;
use crate::core::events::fully_read::{FullyReadEvent, FullyReadEventContent};
use crate::core::events::receipt::{
CreateReceiptReqBody, Receipt, ReceiptEvent, ReceiptEventContent, ReceiptThread, ReceiptType,
SendReceiptReqArgs,
};
use crate::core::presence::PresenceState;
use crate::room::push_action;
use crate::{AppError, AuthArgs, DepotExt, EmptyResult, empty_ok, room};
/// #POST /_matrix/client/r0/rooms/{room_id}/receipt/{receipt_type}/{event_id}
/// Sets private read marker and public read receipt EDU.
#[endpoint]
pub(super) fn send_receipt(
_aa: AuthArgs,
args: SendReceiptReqArgs,
body: JsonBody<CreateReceiptReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let body = body.into_inner();
let thread_id = match &body.thread {
ReceiptThread::Thread(id) => Some(&**id),
_ => None,
};
crate::user::ping_presence(sender_id, &PresenceState::Online)?;
let event_sn = crate::event::get_event_sn(&args.event_id)?;
match args.receipt_type {
ReceiptType::FullyRead => {
let fully_read_event = FullyReadEvent {
content: FullyReadEventContent {
event_id: args.event_id.clone(),
},
};
crate::user::set_data(
sender_id,
Some(args.room_id.clone()),
&RoomAccountDataEventType::FullyRead.to_string(),
serde_json::to_value(fully_read_event.content).expect("to json value always works"),
)?;
push_action::remove_actions_for_room(sender_id, &args.room_id)?;
}
ReceiptType::Read => {
let mut user_receipts = BTreeMap::new();
user_receipts.insert(
sender_id.to_owned(),
Receipt {
ts: Some(UnixMillis::now()),
thread: body.thread.clone(),
},
);
let mut receipts = BTreeMap::new();
receipts.insert(ReceiptType::Read, user_receipts);
let mut receipt_content = BTreeMap::new();
receipt_content.insert(args.event_id.clone(), receipts);
room::receipt::update_read(
sender_id,
&args.room_id,
&ReceiptEvent {
content: ReceiptEventContent(receipt_content),
room_id: args.room_id.clone(),
},
true,
)?;
push_action::remove_actions_until(sender_id, &args.room_id, event_sn, thread_id)?;
}
ReceiptType::ReadPrivate => {
// let count = timeline::get_event_sn(&args.event_id)?
// .ok_or(MatrixError::invalid_param("Event does not exist."))?;
crate::data::room::receipt::set_private_read(
&args.room_id,
sender_id,
&args.event_id,
event_sn,
)?;
push_action::remove_actions_until(sender_id, &args.room_id, event_sn, thread_id)?;
}
_ => return Err(AppError::internal("unsupported receipt type")),
}
if matches!(
&args.receipt_type,
ReceiptType::Read | ReceiptType::ReadPrivate
) {
push_action::refresh_notify_summary(sender_id, &args.room_id)?;
}
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/summary.rs | crates/server/src/routing/client/room/summary.rs | use futures_util::stream::{FuturesUnordered, StreamExt};
use salvo::prelude::*;
use crate::core::client::room::{SummaryMsc3266ReqArgs, SummaryMsc3266ResBody};
use crate::core::events::room::member::MembershipState;
use crate::core::federation::space::{
HierarchyReqArgs, HierarchyResBody, SpaceHierarchyParentSummary, hierarchy_request,
};
use crate::core::identifiers::*;
use crate::core::space::SpaceRoomJoinRule;
use crate::room::state;
use crate::routing::prelude::*;
use crate::{GetUrlOrigin, config, data, room, sending};
/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`
///
/// Returns a short description of the state of a room.
///
/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266)
#[handler]
pub async fn get_summary_msc_3266(
_aa: AuthArgs,
args: SummaryMsc3266ReqArgs,
depot: &mut Depot,
) -> JsonResult<SummaryMsc3266ResBody> {
let authed = depot.authed_info().ok();
let sender_id = authed.map(|a| a.user_id());
let (room_id, servers) =
room::alias::resolve_with_servers(&args.room_id_or_alias, Some(args.via.clone())).await?;
if data::room::is_disabled(&room_id)? {
return Err(MatrixError::forbidden("This room is banned on this homeserver.", None).into());
}
if room::is_server_joined(&config::get().server_name, &room_id)? {
let res_body = local_room_summary(&room_id, sender_id).await?;
json_ok(res_body)
} else {
let room = remote_room_summary_hierarchy(&room_id, &servers, sender_id).await?;
json_ok(SummaryMsc3266ResBody {
room_id: room_id.to_owned(),
canonical_alias: room.canonical_alias,
avatar_url: room.avatar_url,
guest_can_join: room.guest_can_join,
name: room.name,
num_joined_members: room.num_joined_members,
topic: room.topic,
world_readable: room.world_readable,
join_rule: room.join_rule,
room_type: room.room_type,
room_version: room.room_version,
encryption: room.encryption,
allowed_room_ids: room.allowed_room_ids,
membership: sender_id.is_some().then_some(MembershipState::Leave),
})
}
}
async fn local_room_summary(
room_id: &RoomId,
sender_id: Option<&UserId>,
) -> AppResult<SummaryMsc3266ResBody> {
trace!(
?sender_id,
"Sending local room summary response for {room_id:?}"
);
let join_rule = room::get_join_rule(room_id)?;
let world_readable = room::is_world_readable(room_id);
let guest_can_join = room::guest_can_join(room_id);
trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}");
require_user_can_see_summary(
room_id,
&join_rule.clone().into(),
guest_can_join,
world_readable,
join_rule.allowed_rooms(),
sender_id,
)
.await?;
let canonical_alias = room::get_canonical_alias(room_id).ok().flatten();
let name = room::get_name(room_id).ok();
let topic = room::get_topic(room_id).ok();
let room_type = room::get_room_type(room_id).ok().flatten();
let avatar_url = room::get_avatar_url(room_id).ok().flatten();
let room_version = room::get_version(room_id).ok();
let encryption = room::get_encryption(room_id).ok();
let num_joined_members = room::joined_member_count(room_id).unwrap_or(0);
let membership = sender_id.map(|sender_id| {
room::get_member(room_id, sender_id, None)
.map_or(MembershipState::Leave, |content| content.membership)
});
Ok(SummaryMsc3266ResBody {
room_id: room_id.to_owned(),
canonical_alias,
avatar_url,
guest_can_join,
name,
num_joined_members,
topic,
world_readable,
room_type,
room_version,
encryption,
membership,
allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(),
join_rule: join_rule.into(),
})
}
/// Used by MSC3266 to fetch a room's info if we do not know about it
async fn remote_room_summary_hierarchy(
room_id: &RoomId,
servers: &[OwnedServerName],
sender_id: Option<&UserId>,
) -> AppResult<SpaceHierarchyParentSummary> {
trace!(
?sender_id,
?servers,
"Sending remote room summary response for {room_id:?}"
);
let conf = crate::config::get();
if conf.enabled_federation().is_none() {
return Err(MatrixError::forbidden("Federation is disabled.", None).into());
}
if room::is_disabled(room_id)? {
return Err(MatrixError::forbidden(
"Federaton of room {room_id} is currently disabled on this server.",
None,
)
.into());
}
let mut requests: FuturesUnordered<_> = FuturesUnordered::new();
for server in servers {
let Ok(request) = hierarchy_request(
&server.origin().await,
HierarchyReqArgs {
room_id: room_id.to_owned(),
suggested_only: false,
},
) else {
continue;
};
requests.push(sending::send_federation_request(
server,
request.into_inner(),
None,
));
}
while let Some(Ok(response)) = requests.next().await {
trace!("{response:?}");
let Ok(res_body) = response.json::<HierarchyResBody>().await else {
continue;
};
if res_body.room.room_id != room_id {
tracing::warn!(
"Room ID {} returned does not belong to the requested room ID {}",
res_body.room.room_id,
room_id
);
continue;
}
return require_user_can_see_summary(
room_id,
&res_body.room.join_rule,
res_body.room.guest_can_join,
res_body.room.world_readable,
res_body.room.allowed_room_ids.iter().map(|r| &**r),
sender_id,
)
.await
.map(|()| res_body.room);
}
Err(MatrixError::not_found(
"Room is unknown to this server and was unable to fetch over federation with the \
provided servers available",
)
.into())
}
async fn require_user_can_see_summary<'a, I>(
room_id: &RoomId,
join_rule: &SpaceRoomJoinRule,
guest_can_join: bool,
world_readable: bool,
mut allowed_room_ids: I,
sender_id: Option<&UserId>,
) -> AppResult<()>
where
I: Iterator<Item = &'a RoomId> + Send,
{
let is_public_room = matches!(
join_rule,
SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted
);
match sender_id {
Some(sender_id) => {
let user_can_see_events = state::user_can_see_events(sender_id, room_id)?;
let is_guest = data::user::is_deactivated(sender_id).unwrap_or(false);
let user_in_allowed_restricted_room = allowed_room_ids
.any(|room| room::user::is_joined(sender_id, room).unwrap_or(false));
if user_can_see_events
|| (is_guest && guest_can_join)
|| is_public_room
|| user_in_allowed_restricted_room
{
return Ok(());
}
Err(MatrixError::forbidden(
"Room is not world readable, not publicly accessible/joinable, restricted room \
conditions not met, and guest access is forbidden. Not allowed to see details \
of this room.",
None,
)
.into())
}
None => {
if is_public_room || world_readable {
return Ok(());
}
Err(MatrixError::forbidden(
"Room is not world readable or publicly accessible/joinable, authentication is \
required",
None,
)
.into())
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/membership.rs | crates/server/src/routing/client/room/membership.rs | use std::collections::BTreeMap;
use diesel::prelude::*;
use palpo_core::Seqnum;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde_json::value::to_raw_value;
use crate::core::client::membership::MembershipEventFilter;
use crate::core::client::membership::{
BanUserReqBody, InvitationRecipient, InviteUserReqBody, JoinRoomReqBody, JoinRoomResBody,
JoinedMembersResBody, JoinedRoomsResBody, KickUserReqBody, LeaveRoomReqBody, MembersReqArgs,
MembersResBody, RoomMember, UnbanUserReqBody,
};
use crate::core::client::room::{KnockReqArgs, KnockReqBody};
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::events::{StateEventType, TimelineEventType};
use crate::core::federation::query::{ProfileReqArgs, profile_request};
use crate::core::identifiers::*;
use crate::core::state::Event;
use crate::core::user::ProfileResBody;
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::DbProfile;
use crate::event::{BatchToken, PduBuilder, SnPduEvent};
use crate::exts::*;
use crate::membership::banned_room_check;
use crate::room::{state, timeline};
use crate::sending::send_federation_request;
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, data, empty_ok,
json_ok, room, sending, utils,
};
/// #POST /_matrix/client/r0/rooms/{room_id}/members
/// Lists all joined users in a room.
///
/// - Only works if the user is currently joined
#[endpoint]
pub(super) fn get_members(
_aa: AuthArgs,
args: MembersReqArgs,
depot: &mut Depot,
) -> JsonResult<MembersResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let membership = args.membership.as_ref();
let not_membership = args.not_membership.as_ref();
let mut until_sn = if !state::user_can_see_events(sender_id, &args.room_id)? {
if let Ok(leave_sn) = crate::room::user::leave_sn(sender_id, &args.room_id) {
Some(leave_sn)
} else {
return Err(MatrixError::forbidden(
"you don't have permission to view this room",
None,
)
.into());
}
} else {
None
};
let frame_id = if let Some(at_tk) = &args.at {
if let Ok(at_tk) = at_tk.parse::<BatchToken>() {
if let Some(usn) = until_sn {
until_sn = Some(usn.min(at_tk.event_sn()));
} else {
until_sn = Some(at_tk.event_sn());
}
event_points::table
.filter(event_points::room_id.eq(&args.room_id))
.filter(event_points::event_sn.le(at_tk.event_sn()))
.filter(event_points::frame_id.is_not_null())
.order(event_points::frame_id.desc())
.select(event_points::frame_id)
.first::<Option<i64>>(&mut connect()?)?
.unwrap_or_default()
} else {
return Err(MatrixError::bad_state("invalid at parameter").into());
}
} else {
crate::room::get_frame_id(&args.room_id, until_sn).unwrap_or_default()
};
let states: Vec<_> = state::get_full_state(frame_id)?
.into_iter()
.filter(|(key, _)| key.0 == StateEventType::RoomMember)
.filter_map(|(_, pdu)| membership_filter(pdu, membership, not_membership, until_sn))
.map(|pdu| pdu.to_member_event())
.collect();
json_ok(MembersResBody { chunk: states })
}
fn membership_filter(
pdu: SnPduEvent,
for_membership: Option<&MembershipEventFilter>,
not_membership: Option<&MembershipEventFilter>,
until_sn: Option<Seqnum>,
) -> Option<SnPduEvent> {
if let Some(until_sn) = until_sn
&& pdu.event_sn > until_sn
{
return None;
}
let membership_state_filter = match for_membership {
Some(MembershipEventFilter::Ban) => MembershipState::Ban,
Some(MembershipEventFilter::Invite) => MembershipState::Invite,
Some(MembershipEventFilter::Knock) => MembershipState::Knock,
Some(MembershipEventFilter::Leave) => MembershipState::Leave,
Some(_) | None => MembershipState::Join,
};
let not_membership_state_filter = match not_membership {
Some(MembershipEventFilter::Ban) => MembershipState::Ban,
Some(MembershipEventFilter::Invite) => MembershipState::Invite,
Some(MembershipEventFilter::Join) => MembershipState::Join,
Some(MembershipEventFilter::Knock) => MembershipState::Knock,
Some(_) | None => MembershipState::Leave,
};
let evt_membership = pdu.get_content::<RoomMemberEventContent>().ok()?.membership;
if for_membership.is_some() && not_membership.is_some() {
if membership_state_filter != evt_membership
|| not_membership_state_filter == evt_membership
{
None
} else {
Some(pdu)
}
} else if for_membership.is_some() && not_membership.is_none() {
if membership_state_filter != evt_membership {
None
} else {
Some(pdu)
}
} else if not_membership.is_some() && for_membership.is_none() {
if not_membership_state_filter == evt_membership {
None
} else {
Some(pdu)
}
} else {
Some(pdu)
}
}
/// #POST /_matrix/client/r0/rooms/{room_id}/joined_members
/// Lists all members of a room.
///
/// - The sender user must be in the room
/// - TODO: An appservice just needs a puppet joined
/// https://spec.matrix.org/latest/client-server-api/#knocking-on-rooms
#[endpoint]
pub(super) fn joined_members(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
depot: &mut Depot,
) -> JsonResult<JoinedMembersResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let room_id = room_id.into_inner();
// let until_sn = if !state::user_can_see_events(sender_id, &room_id)? {
// if let Ok(leave_sn) = crate::room::user::leave_sn(sender_id, &room_id) {
// Some(leave_sn)
// } else {
// return Err(MatrixError::forbidden("you don't have permission to view this room", None).into());
// }
// } else {
// None
// };
// the sender user must be in the room
if !state::user_can_see_events(sender_id, &room_id)? {
return Err(
MatrixError::forbidden("you don't have permission to view this room", None).into(),
);
}
let mut joined = BTreeMap::new();
for user_id in crate::room::joined_users(&room_id, None)? {
if let Some(DbProfile {
display_name,
avatar_url,
..
}) = data::user::get_profile(&user_id, None)?
{
joined.insert(user_id, RoomMember::new(display_name, avatar_url));
}
}
json_ok(JoinedMembersResBody { joined })
}
/// #POST /_matrix/client/r0/joined_rooms
/// Lists all rooms the user has joined.
#[endpoint]
pub(crate) async fn joined_rooms(
_aa: AuthArgs,
depot: &mut Depot,
) -> JsonResult<JoinedRoomsResBody> {
let authed = depot.authed_info()?;
json_ok(JoinedRoomsResBody {
joined_rooms: data::user::joined_rooms(authed.user_id())?,
})
}
/// #POST /_matrix/client/r0/rooms/{room_id}/forget
/// Forgets about a room.
///
/// - If the sender user currently left the room: Stops sender user from receiving information about the room
///
/// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to
/// be called from every device
#[endpoint]
pub(super) async fn forget_room(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let room_id = room_id.into_inner();
crate::membership::forget_room(authed.user_id(), &room_id)?;
empty_ok()
}
/// #POST /_matrix/client/r0/rooms/{room_id}/leave
/// Tries to leave the sender user from a room.
///
/// - This should always work if the user is currently joined.
#[endpoint]
pub(super) async fn leave_room(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<LeaveRoomReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let room_id = room_id.into_inner();
crate::membership::leave_room(authed.user_id(), &room_id, body.reason.clone()).await?;
empty_ok()
}
/// #POST /_matrix/client/r0/rooms/{room_id}/join
/// Tries to join the sender user into a room.
///
/// - If the server knowns about this room: creates the join event and does auth rules locally
/// - If the server does not know about the room: asks other servers over federation
#[endpoint]
pub(super) async fn join_room_by_id(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<Option<JoinRoomReqBody>>,
depot: &mut Depot,
) -> JsonResult<JoinRoomResBody> {
let authed = depot.authed_info()?;
let room_id = room_id.into_inner();
let body = body.into_inner();
let mut servers = Vec::new(); // There is no body.server_name for /roomId/join
servers.extend(
state::get_user_state(authed.user_id(), &room_id)?
.unwrap_or_default()
.iter()
.filter_map(|event| serde_json::from_str(event.inner().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
servers.push(room_id.server_name().map_err(AppError::public)?.to_owned());
crate::membership::join_room(
&authed.user,
Some(authed.device_id()),
&room_id,
body.as_ref().and_then(|body| body.reason.clone()),
&servers,
body.as_ref()
.and_then(|body| body.third_party_signed.as_ref()),
authed.appservice.as_ref(),
body.as_ref()
.map(|body| body.extra_data.clone())
.unwrap_or_default(),
)
.await?;
json_ok(JoinRoomResBody { room_id })
}
/// #POST /_matrix/client/r0/rooms/{room_id}/invite
/// Tries to send an invite event into the room.
#[endpoint]
pub(super) async fn invite_user(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<InviteUserReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let conf = config::get();
if conf.block_non_admin_invites && !authed.user.is_admin {
return Err(MatrixError::forbidden("you are not allowed to invite users", None).into());
}
let InvitationRecipient::UserId { user_id } = &body.recipient else {
return Err(MatrixError::not_found("user not found").into());
};
crate::membership::invite_user(
authed.user_id(),
user_id,
&room_id.into_inner(),
body.reason.clone(),
false,
)
.await?;
empty_ok()
}
/// #POST /_matrix/client/r0/join/{room_id_or_alias}
/// Tries to join the sender user into a room.
///
/// - If the server knowns about this room: creates the join event and does auth rules locally
/// - If the server does not know about the room: asks other servers over federation
#[endpoint]
pub(crate) async fn join_room_by_id_or_alias(
_aa: AuthArgs,
room_id_or_alias: PathParam<OwnedRoomOrAliasId>,
server_name: QueryParam<Vec<OwnedServerName>, false>,
via: QueryParam<Vec<OwnedServerName>, false>,
body: JsonBody<Option<JoinRoomReqBody>>,
req: &mut Request,
depot: &mut Depot,
) -> JsonResult<JoinRoomResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let room_id_or_alias = room_id_or_alias.into_inner();
let body = body.into_inner().unwrap_or_default();
let remote_addr = req.remote_addr();
// The servers to attempt to join the room through.
//
// One of the servers must be participating in the room.
//
// When serializing, this field is mapped to both `server_name` and `via` with identical values.
//
// When deserializing, the value is read from `via` if it's not missing or empty and `server_name` otherwise.
let via = via
.into_inner()
.unwrap_or_else(|| server_name.into_inner().unwrap_or_default());
let (room_id, servers) = match OwnedRoomId::try_from(room_id_or_alias) {
Ok(room_id) => {
banned_room_check(
sender_id,
Some(&room_id),
room_id.server_name().ok(),
remote_addr,
)
.await?;
let mut servers = if via.is_empty() {
crate::room::lookup_servers(&room_id)?
} else {
via.clone()
};
let state_servers = state::get_user_state(sender_id, &room_id)?.unwrap_or_default();
let state_servers = state_servers
.iter()
.filter_map(|event| serde_json::from_str(event.inner().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned());
servers.extend(state_servers);
// if let Ok(server) = room_id.server_name() {
// if sender_id.is_local() {
// servers.push(server.to_owned());
// }
// }
servers.sort_unstable();
servers.dedup();
(room_id, servers)
}
Err(room_alias) => {
let (room_id, mut servers) =
crate::room::resolve_alias(&room_alias, Some(via.clone())).await?;
banned_room_check(
sender_id,
Some(&room_id),
Some(room_alias.server_name()),
remote_addr,
)
.await?;
let addl_via_servers = if via.is_empty() {
crate::room::lookup_servers(&room_id)?
} else {
via
};
let addl_state_servers =
state::get_user_state(sender_id, &room_id)?.unwrap_or_default();
let mut addl_servers: Vec<_> = addl_state_servers
.iter()
.filter_map(|event| serde_json::from_str(event.inner().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned())
.chain(addl_via_servers)
.collect();
// if let Ok(server) = room_id.server_name() {
// if sender_id.is_local() {
// servers.push(server.to_owned());
// }
// }
servers.append(&mut addl_servers);
servers.sort_unstable();
servers.dedup();
(room_id, servers)
}
};
let join_room_body = crate::membership::join_room(
authed.user(),
Some(authed.device_id()),
&room_id,
body.reason.clone(),
&servers,
body.third_party_signed.as_ref(),
authed.appservice.as_ref(),
body.extra_data,
)
.await?;
json_ok(JoinRoomResBody {
room_id: join_room_body.room_id,
})
}
/// #POST /_matrix/client/r0/rooms/{room_id}/ban
/// Tries to send a ban event into the room.
#[endpoint]
pub(super) async fn ban_user(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<BanUserReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let room_id = room_id.into_inner();
let state_lock = room::lock_state(&room_id).await;
let room_state = room::get_state(
&room_id,
&StateEventType::RoomMember,
body.user_id.as_ref(),
None,
)
.ok();
let event = if let Some(room_state) = room_state {
let event = room_state
.get_content::<RoomMemberEventContent>()
.map_err(|_| AppError::internal("invalid member event in database."))?;
// If they are already banned and the reason is unchanged, there isn't any point in sending a new event.
if event.membership == MembershipState::Ban && event.reason == body.reason {
return empty_ok();
}
RoomMemberEventContent {
membership: MembershipState::Ban,
..event
}
} else if body.user_id.is_remote() {
let profile_request = profile_request(
&body.user_id.server_name().origin().await,
ProfileReqArgs {
user_id: body.user_id.to_owned(),
field: None,
},
)?
.into_inner();
let ProfileResBody {
avatar_url,
display_name,
blurhash,
} = send_federation_request(body.user_id.server_name(), profile_request, None)
.await?
.json()
.await
.unwrap_or_default();
RoomMemberEventContent {
membership: MembershipState::Ban,
display_name,
avatar_url,
is_direct: None,
third_party_invite: None,
blurhash,
reason: body.reason.clone(),
join_authorized_via_users_server: None,
extra_data: Default::default(),
}
} else {
let DbProfile {
display_name,
avatar_url,
blurhash,
..
} = data::user::get_profile(&body.user_id, None)?
.ok_or(MatrixError::not_found("User profile not found."))?;
RoomMemberEventContent {
membership: MembershipState::Ban,
display_name,
avatar_url,
is_direct: None,
third_party_invite: None,
blurhash,
reason: body.reason.clone(),
join_authorized_via_users_server: None,
extra_data: Default::default(),
}
};
let pdu = timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&event).expect("event is valid, we just created it"),
state_key: Some(body.user_id.to_string()),
..Default::default()
},
authed.user_id(),
&room_id,
&crate::room::get_version(&room_id)?,
&state_lock,
)
.await?;
if let Err(e) = sending::send_pdu_room(
&room_id,
&pdu.event_id,
&[body.user_id.server_name().to_owned()],
&[],
) {
error!("failed to notify banned user server: {e}");
}
empty_ok()
}
/// #POST /_matrix/client/r0/rooms/{room_id}/unban
/// Tries to send an unban event into the room.
#[endpoint]
pub(super) async fn unban_user(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<UnbanUserReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let room_id = room_id.into_inner();
let state_lock = room::lock_state(&room_id).await;
let mut event = room::get_state_content::<RoomMemberEventContent>(
&room_id,
&StateEventType::RoomMember,
body.user_id.as_ref(),
None,
)?;
if event.membership != MembershipState::Ban {
return Err(MatrixError::bad_state(format!(
"Cannot unban user who was not banned, current memebership is {}",
event.membership
))
.into());
}
event.membership = MembershipState::Leave;
event.reason = body.reason.clone();
let pdu = timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&event).expect("event is valid, we just created it"),
state_key: Some(body.user_id.to_string()),
..Default::default()
},
authed.user_id(),
&room_id,
&crate::room::get_version(&room_id)?,
&state_lock,
)
.await?;
if let Err(e) = sending::send_pdu_room(
&room_id,
&pdu.event_id,
&[body.user_id.server_name().to_owned()],
&[],
) {
error!("failed to notify banned user server: {e}");
}
empty_ok()
}
/// #POST /_matrix/client/r0/rooms/{room_id}/kick
/// Tries to send a kick event into the room.
#[endpoint]
pub(super) async fn kick_user(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<KickUserReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let room_id = room_id.into_inner();
let state_lock = room::lock_state(&room_id).await;
let Ok(event) = room::get_state(
&room_id,
&StateEventType::RoomMember,
body.user_id.as_str(),
None,
) else {
return Err(MatrixError::forbidden(
"users cannot kick users from a room they are not in",
None,
)
.into());
};
let event_content: RoomMemberEventContent = event.get_content()?;
if !matches!(
event_content.membership,
MembershipState::Invite | MembershipState::Knock | MembershipState::Join,
) {
return Err(MatrixError::forbidden(
format!(
"cannot kick a user who is not apart of the room (current membership: {})",
event_content.membership
),
None,
)
.into());
}
if event_content.membership == MembershipState::Invite && event.sender() != authed.user_id() {
return empty_ok();
}
let pdu = timeline::build_and_append_pdu(
PduBuilder::state(
body.user_id.to_string(),
&RoomMemberEventContent {
membership: MembershipState::Leave,
reason: body.reason.clone(),
is_direct: None,
join_authorized_via_users_server: None,
third_party_invite: None,
..event_content
},
),
authed.user_id(),
&room_id,
&crate::room::get_version(&room_id)?,
&state_lock,
)
.await?;
if let Err(e) = sending::send_pdu_room(
&room_id,
&pdu.event_id,
&[body.user_id.server_name().to_owned()],
&[],
) {
error!("failed to notify banned user server: {e}");
}
empty_ok()
}
/// #POST /_matrix/client/v3/knock/{room_id_or_alias}
#[endpoint]
pub(crate) async fn knock_room(
_aa: AuthArgs,
args: KnockReqArgs,
body: JsonBody<KnockReqBody>,
req: &mut Request,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let (room_id, servers) = match OwnedRoomId::try_from(args.room_id_or_alias) {
Ok(room_id) => {
crate::membership::banned_room_check(
sender_id,
Some(&room_id),
room_id.server_name().ok(),
req.remote_addr(),
)
.await?;
let mut servers = body.via.clone();
servers.extend(crate::room::lookup_servers(&room_id).unwrap_or_default());
servers.extend(
state::get_user_state(sender_id, &room_id)
.unwrap_or_default()
.unwrap_or_default()
.iter()
.filter_map(|event| event.get_field("sender").ok().flatten())
.filter_map(|sender: &str| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
if let Ok(server) = room_id.server_name() {
servers.push(server.to_owned());
}
servers.dedup();
utils::shuffle(&mut servers);
(room_id, servers)
}
Err(room_alias) => {
let (room_id, mut servers) =
crate::room::resolve_alias(&room_alias, Some(body.via.clone())).await?;
banned_room_check(
sender_id,
Some(&room_id),
Some(room_alias.server_name()),
req.remote_addr(),
)
.await?;
let addl_via_servers = crate::room::lookup_servers(&room_id)?;
let addl_state_servers =
state::get_user_state(sender_id, &room_id)?.unwrap_or_default();
let mut addl_servers: Vec<_> = addl_state_servers
.iter()
.filter_map(|event| serde_json::from_str(event.inner().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned())
.chain(addl_via_servers)
.collect();
addl_servers.sort_unstable();
addl_servers.dedup();
utils::shuffle(&mut addl_servers);
servers.append(&mut addl_servers);
(room_id, servers)
}
};
crate::membership::knock_room(sender_id, &room_id, body.reason.clone(), &servers).await?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/space.rs | crates/server/src/routing/client/room/space.rs | use salvo::prelude::*;
use crate::core::client::space::{HierarchyReqArgs, HierarchyResBody};
use crate::{AuthArgs, DepotExt, JsonResult, json_ok, room};
/// `#GET /_matrix/client/v1/rooms/{room_id}/hierarchy`
/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space.
#[endpoint]
pub(super) async fn get_hierarchy(
_aa: AuthArgs,
args: HierarchyReqArgs,
depot: &mut Depot,
) -> JsonResult<HierarchyResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let res_body = room::space::get_room_hierarchy(sender_id, &args).await?;
json_ok(res_body)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/tag.rs | crates/server/src/routing/client/room/tag.rs |
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::tag::{OperateTagReqArgs, TagsResBody, UpsertTagReqBody};
use crate::core::events::RoomAccountDataEventType;
use crate::core::events::tag::TagEventContent;
use crate::core::user::UserRoomReqArgs;
use crate::{AuthArgs, DepotExt, EmptyResult, JsonResult, data, empty_ok, json_ok};
/// #GET /_matrix/client/r0/user/{user_id}/rooms/{room_idd}/tags
/// Returns tags on the room.
///
/// - Gets the tag event of the room account data.
#[endpoint]
pub(super) async fn list_tags(
_aa: AuthArgs,
args: UserRoomReqArgs,
depot: &mut Depot,
) -> JsonResult<TagsResBody> {
let authed = depot.authed_info()?;
let user_data_content = data::user::get_data::<TagEventContent>(
authed.user_id(),
Some(&args.room_id),
&RoomAccountDataEventType::Tag.to_string(),
)
.unwrap_or_default();
json_ok(TagsResBody {
tags: user_data_content.tags,
})
}
/// #PUT /_matrix/client/r0/user/{user_id}/rooms/{room_id}/tags/{tag}
/// Adds a tag to the room.
///
/// - Inserts the tag into the tag event of the room account data.
#[endpoint]
pub(super) async fn upsert_tag(
_aa: AuthArgs,
args: OperateTagReqArgs,
body: JsonBody<UpsertTagReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let mut user_data_content = data::user::get_data::<TagEventContent>(
authed.user_id(),
Some(&args.room_id),
&RoomAccountDataEventType::Tag.to_string(),
)
.unwrap_or_default();
user_data_content
.tags
.insert(args.tag.clone().into(), body.tag_info.clone());
crate::data::user::set_data(
authed.user_id(),
Some(args.room_id.clone()),
&RoomAccountDataEventType::Tag.to_string(),
serde_json::to_value(user_data_content).expect("to json value always works"),
)?;
empty_ok()
}
/// #DELETE /_matrix/client/r0/user/{user_id}/rooms/{room_id}/tags/{tag}
/// Deletes a tag from the room.
///
/// - Removes the tag from the tag event of the room account data.
#[endpoint]
pub(super) async fn delete_tag(
_aa: AuthArgs,
args: OperateTagReqArgs,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let mut user_data_content = crate::data::user::get_data::<TagEventContent>(
authed.user_id(),
Some(&args.room_id),
&RoomAccountDataEventType::Tag.to_string(),
)
.unwrap_or_default();
user_data_content.tags.remove(&args.tag.clone().into());
crate::data::user::set_data(
authed.user_id(),
Some(args.room_id.clone()),
&RoomAccountDataEventType::Tag.to_string(),
serde_json::to_value(user_data_content).expect("to json value always works"),
)?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/message.rs | crates/server/src/routing/client/room/message.rs | use std::collections::{BTreeMap, HashSet};
use diesel::prelude::*;
use serde_json::value::to_raw_value;
use crate::core::Direction;
use crate::core::client::message::{
CreateMessageReqArgs, CreateMessageWithTxnReqArgs, MessagesReqArgs, MessagesResBody,
SendMessageResBody,
};
use crate::core::events::{StateEventType, TimelineEventType};
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
use crate::event::BatchToken;
use crate::room::timeline::{self, topolo};
use crate::routing::prelude::*;
use crate::{PduBuilder, room};
/// #GET /_matrix/client/r0/rooms/{room_id}/messages
/// Allows paginating through room history.
#[endpoint]
pub(super) async fn get_messages(
_aa: AuthArgs,
args: MessagesReqArgs,
depot: &mut Depot,
) -> JsonResult<MessagesResBody> {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let is_joined = diesel_exists!(
room_users::table
.filter(room_users::room_id.eq(&args.room_id))
.filter(room_users::user_id.eq(sender_id))
.filter(room_users::membership.eq("join")),
&mut connect()?
)?;
if !is_joined {
let Some((_event_sn, forgotten)) = room_users::table
.filter(room_users::room_id.eq(&args.room_id))
.filter(room_users::user_id.eq(sender_id))
.filter(room_users::membership.eq("leave"))
.select((room_users::event_sn, room_users::forgotten))
.first::<(i64, bool)>(&mut connect()?)
.optional()?
else {
return Err(MatrixError::forbidden("you aren't a member of the room", None).into());
};
if forgotten {
return Err(MatrixError::forbidden("you aren't a member of the room", None).into());
}
}
// let until_tk = if !is_joined {
// let Some((event_sn, forgotten)) = room_users::table
// .filter(room_users::room_id.eq(&args.room_id))
// .filter(room_users::user_id.eq(sender_id))
// .filter(room_users::membership.eq("leave"))
// .select((room_users::event_sn, room_users::forgotten))
// .first::<(i64, bool)>(&mut connect()?)
// .optional()?
// else {
// return Err(MatrixError::forbidden("you aren't a member of the room", None).into());
// };
// if forgotten {
// return Err(MatrixError::forbidden("you aren't a member of the room", None).into());
// }
// get_batch_token_by_sn(event_sn).ok()
// } else {
// args.to.as_ref().map(|to| to.parse()).transpose()?
// };
let until_tk = args.to.as_ref().map(|to| to.parse()).transpose()?;
let from_tk: BatchToken = args
.from
.as_ref()
.map(|from| from.parse())
.transpose()?
.unwrap_or(match args.dir {
Direction::Forward => BatchToken::LIVE_MIN,
Direction::Backward => BatchToken::LIVE_MAX,
});
// if from_tk.event_depth.is_none() {
// from_tk = events::table
// .filter(events::sn.le(from_tk.event_sn()))
// .order_by(events::sn.desc())
// .select((events::sn, events::depth))
// .first::<(Seqnum, i64)>(&mut connect()?)
// .map(|(sn, depth)| BatchToken::new(sn, Some(depth)))?
// }
crate::room::lazy_loading::lazy_load_confirm_delivery(
authed.user_id(),
authed.device_id(),
&args.room_id,
from_tk.event_sn(),
)?;
let limit = args.limit.min(100);
let next_token;
let mut resp = MessagesResBody::default();
let mut lazy_loaded = HashSet::new();
match args.dir {
Direction::Forward => {
let events = topolo::load_pdus_forward(
Some(sender_id),
&args.room_id,
Some(from_tk),
until_tk,
Some(&args.filter),
limit,
)?;
for (_, event) in &events {
/* TODO: Remove this when these are resolved:
* https://github.com/vector-im/element-android/issues/3417
* https://github.com/vector-im/element-web/issues/21034
if !crate::room::lazy_loading.lazy_load_was_sent_before(
sender_id,
sender_id,
&body.room_id,
&event.sender,
)? {
lazy_loaded.insert(event.sender.clone());
}
*/
lazy_loaded.insert(event.sender.clone());
}
next_token = events.last().map(|(_, pdu)| pdu.live_token());
let events: Vec<_> = events
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
resp.start = from_tk.to_string();
resp.end = next_token.map(|tk| tk.to_string());
resp.chunk = events;
}
Direction::Backward => {
let mut events: indexmap::IndexMap<i64, crate::SnPduEvent> =
topolo::load_pdus_backward(
Some(sender_id),
&args.room_id,
Some(from_tk),
until_tk,
Some(&args.filter),
limit,
)?;
let filled_events =
timeline::backfill_if_required(&args.room_id, &from_tk, &events, limit).await?;
if !filled_events.is_empty() {
events = topolo::load_pdus_backward(
Some(sender_id),
&args.room_id,
Some(from_tk),
until_tk,
Some(&args.filter),
limit,
)?;
}
for (_, event) in &events {
/* TODO: Remove this when these are resolved:
* https://github.com/vector-im/element-android/issues/3417
* https://github.com/vector-im/element-web/issues/21034
if !crate::room::lazy_loading.lazy_load_was_sent_before(
sender_id,
authed.device_id(),
&args.room_id,
&event.sender,
)? {
lazy_loaded.insert(event.sender.clone());
}
*/
lazy_loaded.insert(event.sender.clone());
}
next_token = events.last().map(|(_, pdu)| pdu.prev_historic_token());
resp.start = from_tk.to_string();
resp.end = next_token.map(|tk| tk.to_string());
resp.chunk = events.values().map(|pdu| pdu.to_room_event()).collect();
}
}
resp.state = Vec::new();
for ll_id in &lazy_loaded {
if let Ok(member_event) = room::get_state(
&args.room_id,
&StateEventType::RoomMember,
ll_id.as_str(),
None,
) {
resp.state.push(member_event.to_state_event());
}
}
// TODO: enable again when we are sure clients can handle it
/*
if let Some(next_token) = next_token {
crate::room::lazy_loading.lazy_load_mark_sent(
authed.user_id(),
authed.device_id(),
&body.room_id,
lazy_loaded,
next_token,
);
}
*/
json_ok(resp)
}
/// #PUT /_matrix/client/r0/rooms/{room_id}/send/{event_type}/{txn_id}
/// Send a message event into the room.
///
/// - Is a NOOP if the txn id was already used before and returns the same event id again
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
#[endpoint]
pub(super) async fn send_message(
_aa: AuthArgs,
args: CreateMessageWithTxnReqArgs,
req: &mut Request,
depot: &mut Depot,
) -> JsonResult<SendMessageResBody> {
let authed = depot.authed_info()?;
let conf = config::get();
// Forbid m.room.encrypted if encryption is disabled
if TimelineEventType::RoomEncrypted == args.event_type.to_string().into()
&& !conf.allow_encryption
{
return Err(MatrixError::forbidden("Encryption has been disabled", None).into());
}
let payload = req.payload().await?;
// Ensure it's valid JSON.
let _content: JsonValue =
serde_json::from_slice(payload).map_err(|_| MatrixError::bad_json("invalid json body"))?;
let state_lock = room::lock_state(&args.room_id).await;
// Check if this is a new transaction id
if let Some(event_id) = crate::transaction_id::get_event_id(
&args.txn_id,
authed.user_id(),
Some(authed.device_id()),
Some(&args.room_id),
)? {
return json_ok(SendMessageResBody::new(event_id));
}
let mut unsigned = BTreeMap::new();
unsigned.insert(
"transaction_id".to_owned(),
to_raw_value(&args.txn_id).expect("TxnId is valid json"),
);
let event_id = timeline::build_and_append_pdu(
PduBuilder {
event_type: args.event_type.to_string().into(),
content: serde_json::from_slice(payload)
.map_err(|_| MatrixError::bad_json("invalid json body"))?,
unsigned,
timestamp: if authed.appservice().is_some() {
args.timestamp
} else {
None
},
..Default::default()
},
authed.user_id(),
&args.room_id,
&crate::room::get_version(&args.room_id)?,
&state_lock,
)
.await?
.pdu
.event_id;
crate::transaction_id::add_txn_id(
&args.txn_id,
authed.user_id(),
Some(authed.device_id()),
Some(&args.room_id),
Some(&event_id),
)?;
json_ok(SendMessageResBody::new((*event_id).to_owned()))
}
/// #POST /_matrix/client/r0/rooms/{room_id}/send/{event_type}
/// Send a message event into the room.
///
/// - Is a NOOP if the txn id was already used before and returns the same event id again
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
#[endpoint]
pub(super) async fn post_message(
_aa: AuthArgs,
args: CreateMessageReqArgs,
req: &mut Request,
depot: &mut Depot,
) -> JsonResult<SendMessageResBody> {
let authed = depot.authed_info()?;
let conf = config::get();
let state_lock = room::lock_state(&args.room_id).await;
// Forbid m.room.encrypted if encryption is disabled
if TimelineEventType::RoomEncrypted == args.event_type.to_string().into()
&& !conf.allow_encryption
{
return Err(MatrixError::forbidden("Encryption has been disabled", None).into());
}
let payload = req.payload().await?;
// Ensure it's valid JSON.
let content: JsonValue =
serde_json::from_slice(payload).map_err(|_| MatrixError::bad_json("invalid json body"))?;
if !content.is_object() {
return Err(MatrixError::bad_json("json body is not object").into());
}
let event_id = timeline::build_and_append_pdu(
PduBuilder {
event_type: args.event_type.to_string().into(),
content: serde_json::from_slice(payload)
.map_err(|_| MatrixError::bad_json("invalid json body"))?,
unsigned: BTreeMap::new(),
..Default::default()
},
authed.user_id(),
&args.room_id,
&crate::room::get_version(&args.room_id)?,
&state_lock,
)
.await?
.pdu
.event_id;
json_ok(SendMessageResBody::new((*event_id).to_owned()))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/room/thread.rs | crates/server/src/routing/client/room/thread.rs | use salvo::prelude::*;
use crate::core::client::room::{ThreadsReqArgs, ThreadsResBody};
use crate::room::state;
use crate::{AuthArgs, DepotExt, JsonResult, json_ok};
/// #GET /_matrix/client/r0/rooms/{room_id}/threads
#[endpoint]
pub(super) async fn list_threads(
_aa: AuthArgs,
args: ThreadsReqArgs,
depot: &mut Depot,
) -> JsonResult<ThreadsResBody> {
let authed = depot.authed_info()?;
// Use limit or else 10, with maximum 100
let limit = args
.limit
.and_then(|l| l.try_into().ok())
.unwrap_or(10)
.min(100);
let from: Option<i64> = if let Some(from) = &args.from {
Some(from.parse()?)
} else {
None
};
let (events, next_batch) =
crate::room::thread::get_threads(&args.room_id, &args.include, limit, from)?;
let threads = events
.into_iter()
.filter(|(_, pdu)| {
state::user_can_see_event(authed.user_id(), &pdu.event_id).unwrap_or(false)
})
.collect::<Vec<_>>();
json_ok(ThreadsResBody {
chunk: threads
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect(),
next_batch: next_batch.map(|b| b.to_string()),
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/directory/alias.rs | crates/server/src/routing/client/directory/alias.rs | use diesel::prelude::*;
use salvo::oapi::extract::{JsonBody, PathParam};
use salvo::prelude::*;
use crate::core::client::room::{AliasResBody, SetAliasReqBody};
use crate::core::identifiers::*;
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
use crate::exts::*;
use crate::{AuthArgs, EmptyResult, JsonResult, MatrixError, empty_ok, json_ok};
/// #GET /_matrix/client/r0/directory/room/{room_alias}
/// Resolve an alias locally or over federation.
///
/// - TODO: Suggest more servers to join via
#[endpoint]
pub(super) async fn get_alias(
_aa: AuthArgs,
room_alias: PathParam<OwnedRoomAliasId>,
) -> JsonResult<AliasResBody> {
let room_alias = room_alias.into_inner();
let Ok((room_id, servers)) = crate::room::resolve_alias(&room_alias, None).await else {
return Err(MatrixError::not_found("Room with alias not found.").into());
};
let servers = crate::room::room_available_servers(&room_id, &room_alias, servers).await?;
debug!(?room_alias, ?room_id, "available servers: {servers:?}");
json_ok(AliasResBody::new(room_id, servers))
}
/// #PUT /_matrix/client/r0/directory/room/{room_alias}
/// Creates a new room alias on this server.
#[endpoint]
pub(super) async fn upsert_alias(
_aa: AuthArgs,
room_alias: PathParam<OwnedRoomAliasId>,
body: JsonBody<SetAliasReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let alias_id = room_alias.into_inner();
if alias_id.is_remote() {
return Err(MatrixError::invalid_param("alias is from another server").into());
}
if crate::room::resolve_local_alias(&alias_id).is_ok() {
return Err(MatrixError::forbidden("alias already exists", None).into());
}
let query = room_aliases::table
.filter(room_aliases::alias_id.eq(&alias_id))
.filter(room_aliases::room_id.ne(&body.room_id));
if diesel_exists!(query, &mut connect()?)? {
return Err(StatusError::conflict()
.brief("a room alias with that name already exists")
.into());
}
crate::room::set_alias(body.room_id.clone(), alias_id, authed.user_id())?;
empty_ok()
}
/// #DELETE /_matrix/client/r0/directory/room/{room_alias}
/// Deletes a room alias from this server.
///
/// - TODO: additional access control checks
/// - TODO: Update canonical alias event
#[endpoint]
pub(super) async fn delete_alias(
_aa: AuthArgs,
room_alias: PathParam<OwnedRoomAliasId>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let alias = room_alias.into_inner();
if alias.is_remote() {
return Err(MatrixError::invalid_param("Alias is from another server.").into());
}
crate::room::remove_alias(&alias, authed.user()).await?;
// TODO: update alt_aliases?
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/directory/room.rs | crates/server/src/routing/client/directory/room.rs | use diesel::prelude::*;
use salvo::oapi::extract::{JsonBody, PathParam};
use salvo::prelude::*;
use crate::AuthArgs;
use crate::core::client::directory::SetRoomVisibilityReqBody;
use crate::core::client::directory::VisibilityResBody;
use crate::core::identifiers::*;
use crate::core::room::Visibility;
use crate::data::room::DbRoom;
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
use crate::{EmptyResult, JsonResult, empty_ok, json_ok};
/// #GET /_matrix/client/r0/directory/list/room/{room_id}
/// Gets the visibility of a given room in the room directory.
#[endpoint]
pub(super) async fn get_visibility(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
) -> JsonResult<VisibilityResBody> {
let room_id = room_id.into_inner();
let query = rooms::table
.filter(rooms::id.eq(&room_id))
.filter(rooms::is_public.eq(true));
let visibility = if diesel_exists!(query, &mut connect()?)? {
Visibility::Public
} else {
Visibility::Private
};
json_ok(VisibilityResBody { visibility })
}
/// #PUT /_matrix/client/r0/directory/list/room/{room_id}
/// Sets the visibility of a given room in the room directory.
///
/// - TODO: Access control checks
#[endpoint]
pub(super) async fn set_visibility(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<SetRoomVisibilityReqBody>,
) -> EmptyResult {
let room_id = room_id.into_inner();
let room = rooms::table
.find(&room_id)
.first::<DbRoom>(&mut connect()?)?;
diesel::update(&room)
.set(rooms::is_public.eq(body.visibility == Visibility::Public))
.execute(&mut connect()?)?;
empty_ok()
}
#[endpoint]
pub(super) async fn set_visibility_with_network_id(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/user/filter.rs | crates/server/src/routing/client/user/filter.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::filter::{CreateFilterReqBody, CreateFilterResBody, FilterResBody};
use crate::{AuthArgs, DepotExt, JsonResult, data, json_ok};
/// #GET /_matrix/client/r0/user/{user_id}/filter/{filter_id}
/// Loads a filter that was previously created.
///
/// - A user can only access their own filters
#[endpoint]
pub(super) fn get_filter(
_aa: AuthArgs,
filter_id: PathParam<i64>,
depot: &mut Depot,
) -> JsonResult<FilterResBody> {
let authed = depot.authed_info()?;
let filter = crate::data::user::get_filter(authed.user_id(), filter_id.into_inner())?;
json_ok(FilterResBody::new(filter))
}
/// #POST /_matrix/client/r0/user/{user_id}/filter
/// Creates a new filter to be used by other endpoints.
#[endpoint]
pub(super) fn create_filter(
_aa: AuthArgs,
body: JsonBody<CreateFilterReqBody>,
depot: &mut Depot,
) -> JsonResult<CreateFilterResBody> {
let authed = depot.authed_info()?;
let filter_id = data::user::create_filter(authed.user_id(), &body.filter)?;
json_ok(CreateFilterResBody::new(filter_id.to_string()))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/user/openid.rs | crates/server/src/routing/client/user/openid.rs | use salvo::prelude::*;
use crate::{AuthArgs, EmptyResult, empty_ok};
#[endpoint]
pub(super) async fn request_token(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/user/room.rs | crates/server/src/routing/client/user/room.rs | use salvo::prelude::*;
use crate::{AuthArgs, EmptyResult, empty_ok};
#[endpoint]
pub(super) async fn get_mutual_rooms(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/user/account.rs | crates/server/src/routing/client/user/account.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde::Deserialize;
use crate::core::client::account::data::{GlobalAccountDataResBody, RoomAccountDataResBody};
use crate::core::events::AnyGlobalAccountDataEventContent;
use crate::core::identifiers::*;
use crate::core::serde::{JsonValue, RawJson};
use crate::core::user::{UserEventTypeReqArgs, UserRoomEventTypeReqArgs};
use crate::{
AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, OptionalExtension, data, empty_ok,
json_ok,
};
#[derive(Deserialize)]
struct ExtractGlobalEventContent {
content: RawJson<AnyGlobalAccountDataEventContent>,
}
/// #GET /_matrix/client/r0/user/{user_id}/account_data/{event_type}
/// Gets some account data for the sender user.
#[endpoint]
pub(super) async fn get_global_data(
_aa: AuthArgs,
args: UserEventTypeReqArgs,
depot: &mut Depot,
) -> JsonResult<GlobalAccountDataResBody> {
let authed = depot.authed_info()?;
let content =
data::user::get_data::<JsonValue>(authed.user_id(), None, &args.event_type.to_string())
.map_err(|_| MatrixError::not_found("user data not found"))?;
json_ok(GlobalAccountDataResBody(RawJson::from_value(&content)?))
}
/// #PUT /_matrix/client/r0/user/{user_id}/account_data/{event_type}
/// Sets some account data for the sender user.
#[endpoint]
pub(super) async fn set_global_data(
_aa: AuthArgs,
args: UserEventTypeReqArgs,
body: JsonBody<JsonValue>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let event_type = args.event_type.to_string();
let body = body.into_inner();
if event_type == "m.ignored_user_list" {
let ignored_ids: Vec<OwnedUserId> = body
.get("ignored_users")
.and_then(|v| v.as_object().cloned())
.unwrap_or_default()
.into_iter()
.filter_map(|(id, _)| OwnedUserId::try_from(id).ok())
.collect();
data::user::set_ignored_users(authed.user_id(), &ignored_ids)?;
}
data::user::set_data(authed.user_id(), None, &event_type, body)?;
empty_ok()
}
/// #GET /_matrix/client/r0/user/{user_id}/rooms/{roomId}/account_data/{event_type}
/// Gets some account data for the sender user.
#[endpoint]
pub(super) async fn get_room_data(
_aa: AuthArgs,
args: UserRoomEventTypeReqArgs,
depot: &mut Depot,
) -> JsonResult<RoomAccountDataResBody> {
let authed = depot.authed_info()?;
let content = data::user::get_data::<JsonValue>(
authed.user_id(),
Some(&*args.room_id),
&args.event_type.to_string(),
)
.map_err(|_| MatrixError::not_found("user data not found"))?;
json_ok(RoomAccountDataResBody(RawJson::from_value(&content)?))
}
/// #PUT /_matrix/client/r0/user/{user_id}/account_data/{event_type}
/// Sets some room account data for the sender user.
#[endpoint]
pub(super) async fn set_room_data(
_aa: AuthArgs,
args: UserRoomEventTypeReqArgs,
body: JsonBody<JsonValue>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let event_type = args.event_type.to_string();
data::user::set_data(
authed.user_id(),
Some(args.room_id),
&event_type,
body.into_inner(),
)?;
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/key/signature.rs | crates/server/src/routing/client/key/signature.rs | use std::collections::BTreeMap;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::key::{UploadSignaturesReqBody, UploadSignaturesResBody};
use crate::{AuthArgs, DepotExt, JsonResult, MatrixError, json_ok};
/// #POST /_matrix/client/r0/keys/signatures/upload
/// Uploads end-to-end key signatures from the sender user.
#[endpoint]
pub(super) async fn upload(
_aa: AuthArgs,
body: JsonBody<UploadSignaturesReqBody>,
depot: &mut Depot,
) -> JsonResult<UploadSignaturesResBody> {
let authed = depot.authed_info()?;
let body = body.into_inner();
for (user_id, keys) in &body.0 {
for (key_id, key) in keys {
let key = serde_json::to_value(key)
.map_err(|_| MatrixError::invalid_param("Invalid key JSON"))?;
for signature in key
.get("signatures")
.ok_or(MatrixError::invalid_param("Missing signatures field."))?
.get(authed.user_id().to_string())
.ok_or(MatrixError::invalid_param(
"Invalid user in signatures field.",
))?
.as_object()
.ok_or(MatrixError::invalid_param("Invalid signature."))?
.clone()
.into_iter()
{
// Signature validation?
let signature = (
signature.0,
signature
.1
.as_str()
.ok_or(MatrixError::invalid_param("Invalid signature value."))?
.to_owned(),
);
crate::user::sign_key(user_id, key_id, signature, authed.user_id())?;
}
}
}
json_ok(UploadSignaturesResBody {
failures: BTreeMap::new(), // TODO: integrate
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/key/device_signing.rs | crates/server/src/routing/client/key/device_signing.rs | use salvo::prelude::*;
use crate::core::client::key::UploadSigningKeysReqBody;
use crate::core::client::uiaa::{AuthFlow, AuthType, UiaaInfo};
use crate::core::serde::CanonicalJsonValue;
use crate::{AuthArgs, DepotExt, EmptyResult, MatrixError, SESSION_ID_LENGTH, empty_ok, utils};
/// #POST /_matrix/client/r0/keys/device_signing/upload
/// Uploads end-to-end key information for the sender user.
///
/// - Requires UIAA to verify password
#[endpoint]
pub(super) async fn upload(_aa: AuthArgs, req: &mut Request, depot: &mut Depot) -> EmptyResult {
let authed = depot.authed_info()?;
let sender_id = authed.user_id();
let payload = req.payload().await?;
// UIAA
let mut uiaa_info = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
session: None,
auth_error: None,
};
let body = serde_json::from_slice::<UploadSigningKeysReqBody>(payload);
let none_auth = body.as_ref().map(|b| b.auth.is_none()).unwrap_or(true);
let uia_required = if none_auth {
let exist_master_key = crate::user::key::get_master_key(sender_id)?;
let exist_self_signing_key = crate::user::key::get_self_signing_key(sender_id)?;
let exist_user_signing_key = crate::user::key::get_user_signing_key(sender_id)?;
if exist_master_key.is_none()
&& exist_self_signing_key.is_none()
&& exist_user_signing_key.is_none()
{
false
} else {
exist_master_key.as_ref()
!= body.as_ref().map(|b| b.master_key.as_ref()).unwrap_or(None)
|| exist_self_signing_key.as_ref()
!= body
.as_ref()
.map(|b| b.self_signing_key.as_ref())
.unwrap_or(None)
|| exist_user_signing_key.as_ref()
!= body
.as_ref()
.map(|b| b.user_signing_key.as_ref())
.unwrap_or(None)
}
} else {
false
};
if body.is_err() || uia_required {
if let Ok(json) = serde_json::from_slice::<CanonicalJsonValue>(payload) {
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
crate::uiaa::create_session(sender_id, authed.device_id(), &uiaa_info, json)?;
return Err(uiaa_info.into());
} else {
return Err(MatrixError::not_json("no json body was sent when required").into());
}
};
let body = body.expect("body should be ok");
if uia_required {
let Some(auth) = &body.auth else {
return Err(MatrixError::not_json("auth is none should not happend").into());
};
crate::uiaa::try_auth(sender_id, authed.device_id(), auth, &uiaa_info)?;
}
if let Some(master_key) = &body.master_key {
crate::user::add_cross_signing_keys(
sender_id,
master_key,
&body.self_signing_key,
&body.user_signing_key,
true, // notify so that other users see the new keys
)?;
}
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/account/password.rs | crates/server/src/routing/client/account/password.rs | use std::collections::BTreeMap;
use diesel::prelude::*;
use palpo_core::client::account::ChangePasswordReqBody;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::client::uiaa::{AuthFlow, AuthType, UiaaInfo};
use crate::data::connect;
use crate::data::schema::*;
use crate::exts::*;
use crate::{AuthArgs, EmptyResult, SESSION_ID_LENGTH, empty_ok, hoops, utils};
pub fn authed_router() -> Router {
Router::with_path("password")
.hoop(hoops::limit_rate)
.post(change_password)
}
/// #POST /_matrix/client/r0/account/password
/// Changes the password of this account.
///
/// - Requires UIAA to verify user password
/// - Changes the password of the sender user
/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
/// not saved
///
/// If logout_devices is true it does the following for each device except the sender device:
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
#[endpoint]
async fn change_password(
req: &mut Request,
_aa: AuthArgs,
body: JsonBody<ChangePasswordReqBody>,
depot: &mut Depot,
) -> EmptyResult {
let authed = depot.authed_info()?;
let mut uiaa_info = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
session: None,
auth_error: None,
};
let Some(auth) = &body.auth else {
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
return Err(uiaa_info.into());
};
if crate::uiaa::try_auth(authed.user_id(), authed.device_id(), auth, &uiaa_info).is_err() {
uiaa_info.session = Some(utils::random_string(SESSION_ID_LENGTH));
return Err(uiaa_info.into());
}
crate::user::set_password(authed.user_id(), &body.new_password)?;
if let Some(access_token_id) = authed.access_token_id() {
diesel::delete(
user_pushers::table
.filter(user_pushers::user_id.eq(authed.user_id()))
.filter(user_pushers::access_token_id.ne(access_token_id)),
)
.execute(&mut connect()?)?;
}
if body.logout_devices {
// Logout all devices except the current one
diesel::delete(
user_devices::table
.filter(user_devices::user_id.eq(authed.user_id()))
.filter(user_devices::device_id.ne(authed.device_id())),
)
.execute(&mut connect()?)?;
diesel::delete(
user_access_tokens::table
.filter(user_access_tokens::user_id.eq(authed.user_id()))
.filter(user_access_tokens::device_id.ne(authed.device_id())),
)
.execute(&mut connect()?)?;
diesel::delete(
user_refresh_tokens::table
.filter(user_refresh_tokens::user_id.eq(authed.user_id()))
.filter(user_refresh_tokens::device_id.ne(authed.device_id())),
)
.execute(&mut connect()?)?;
}
info!("User {} changed their password.", authed.user_id());
// crate::admin::send_message(RoomMessageEventContent::notice_plain(format!("User {user} changed their password.")));
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/client/account/threepid.rs | crates/server/src/routing/client/account/threepid.rs | //! `POST /_matrix/client/*/account/3pid/add`
//!
//! Add contact information to a user's account
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3account3pidadd
use salvo::prelude::*;
use crate::core::client::account::threepid::ThreepidsResBody;
use crate::{AuthArgs, EmptyResult, JsonResult, empty_ok, json_ok};
pub fn authed_router() -> Router {
Router::with_path("3pid")
.get(get)
// 1.0 => "/_matrix/client/r0/account/3pid/add",
// 1.1 => "/_matrix/client/v3/account/3pid/add",
// 1.0 => "/_matrix/client/r0/account/3pid/bind",
// 1.1 => "/_matrix/client/v3/account/3pid/bind",
.push(Router::with_path("add").post(add))
.push(Router::with_path("bind").post(bind))
.push(Router::with_path("delete").post(delete))
}
/// #GET _matrix/client/v3/account/3pid
/// Get a list of third party identifiers associated with this account.
///
/// - Currently always returns empty list
#[endpoint]
async fn get(_aa: AuthArgs) -> JsonResult<ThreepidsResBody> {
// TODO: later
json_ok(ThreepidsResBody::new(Vec::new()))
}
#[endpoint]
async fn add(_aa: AuthArgs) -> EmptyResult {
// TODO: later
empty_ok()
}
#[endpoint]
async fn bind(_aa: AuthArgs) -> EmptyResult {
// TODO: later
empty_ok()
}
#[endpoint]
async fn unbind(_aa: AuthArgs) -> EmptyResult {
// TODO: later
empty_ok()
}
#[endpoint]
async fn delete(_aa: AuthArgs) -> EmptyResult {
// TODO: later
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/user.rs | crates/server/src/routing/federation/user.rs | //! Endpoints for handling keys for end-to-end encryption
use diesel::prelude::*;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::AuthArgs;
use crate::core::federation::device::{Device, DevicesResBody};
use crate::core::federation::key::{
ClaimKeysReqBody, ClaimKeysResBody, QueryKeysReqBody, QueryKeysResBody,
};
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::schema::*;
use crate::{AppError, CjsonResult, DepotExt, JsonResult, cjson_ok, data, json_ok};
pub fn router() -> Router {
Router::with_path("user")
.push(
Router::with_path("keys")
.push(Router::with_path("claim").post(claim_keys))
.push(Router::with_path("query").post(query_keys)),
)
.push(Router::with_path("devices/{user_id}").get(get_devices))
}
/// #POST /_matrix/federation/v1/user/keys/claim
/// Claims one-time keys.
#[endpoint]
async fn claim_keys(
_aa: AuthArgs,
body: JsonBody<ClaimKeysReqBody>,
) -> CjsonResult<ClaimKeysResBody> {
let result = crate::user::claim_one_time_keys(&body.one_time_keys).await?;
cjson_ok(ClaimKeysResBody {
one_time_keys: result.one_time_keys,
})
}
/// #POST /_matrix/federation/v1/user/keys/query
/// Gets devices and identity keys for the given users.
#[endpoint]
async fn query_keys(
_aa: AuthArgs,
body: JsonBody<QueryKeysReqBody>,
depot: &mut Depot,
) -> CjsonResult<QueryKeysResBody> {
let origin = depot.origin()?;
let result = crate::user::query_keys(
None,
&body.device_keys,
|u| u.server_name() == origin,
false,
)
.await?;
cjson_ok(QueryKeysResBody {
device_keys: result.device_keys,
master_keys: result.master_keys,
self_signing_keys: result.self_signing_keys,
})
}
/// #GET /_matrix/federation/v1/user/devices/{user_id}
/// Gets information on all devices of the user.
#[endpoint]
fn get_devices(
_aa: AuthArgs,
user_id: PathParam<OwnedUserId>,
depot: &mut Depot,
) -> JsonResult<DevicesResBody> {
let origin = depot.origin()?;
let user_id = user_id.into_inner();
let stream_id = device_streams::table
.filter(device_streams::user_id.eq(&user_id))
.select(device_streams::id)
.order_by(device_streams::id.desc())
.first::<i64>(&mut connect()?)
.optional()?
.unwrap_or_default();
let mut devices = vec![];
let devices_and_names = user_devices::table
.filter(user_devices::user_id.eq(&user_id))
.select((user_devices::device_id, user_devices::display_name))
.load::<(OwnedDeviceId, Option<String>)>(&mut connect()?)?;
for (device_id, display_name) in devices_and_names {
devices.push(Device {
keys: data::user::get_device_keys_and_sigs(&user_id, &device_id)?
.ok_or_else(|| AppError::public("server keys not found"))?,
device_id,
device_display_name: display_name,
})
}
json_ok(DevicesResBody {
stream_id: stream_id as u64,
devices,
master_key: crate::user::get_allowed_master_key(Some(&user_id), &user_id, &|u| {
u.server_name() == origin
})?,
self_signing_key: crate::user::get_allowed_self_signing_key(
Some(&user_id),
&user_id,
&|u| u.server_name() == origin,
)?,
user_id: user_id.to_owned(),
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/event.rs | crates/server/src/routing/federation/event.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::federation::authorization::{EventAuthReqArgs, EventAuthResBody};
use crate::core::federation::event::{
EventReqArgs, EventResBody, MissingEventsReqBody, MissingEventsResBody,
};
use crate::core::identifiers::*;
use crate::core::room::{TimestampToEventReqArgs, TimestampToEventResBody};
use crate::data::room::DbEvent;
use crate::room::{state, timeline};
use crate::{
AppError, AuthArgs, DepotExt, EmptyResult, JsonResult, MatrixError, config, empty_ok, json_ok,
};
pub fn router() -> Router {
Router::new()
.push(Router::with_path("event/{event_id}").get(get_event))
.push(Router::with_path("event_auth/{room_id}/{event_id}").get(auth_chain))
.push(Router::with_path("timestamp_to_event/{room_id}").get(timestamp_to_event))
.push(Router::with_path("get_missing_events/{room_id}").post(missing_events))
.push(
Router::with_path("exchange_third_party_invite/{room_id}")
.put(exchange_third_party_invite),
)
}
/// #GET /_matrix/federation/v1/event/{event_id}
/// Retrieves a single event from the server.
///
/// - Only works if a user of this server is currently invited or joined the room
#[endpoint]
fn get_event(_aa: AuthArgs, args: EventReqArgs, depot: &mut Depot) -> JsonResult<EventResBody> {
let origin = depot.origin()?;
let event = DbEvent::get_by_id(&args.event_id)?;
if event.rejection_reason.is_some() {
warn!("event {} is rejected, returning 404", &args.event_id);
return Err(MatrixError::not_found("event not found").into());
}
let event_json = timeline::get_pdu_json(&args.event_id)?.ok_or_else(|| {
warn!("event not found, event id: {:?}", &args.event_id);
MatrixError::not_found("event not found")
})?;
let room_id_str = event_json
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| AppError::internal("invalid event in database"))?;
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| AppError::internal("invalid room id field in event in database"))?;
crate::federation::access_check(origin, room_id, Some(&args.event_id))?;
json_ok(EventResBody {
origin: config::get().server_name.to_owned(),
origin_server_ts: UnixMillis::now(),
pdu: crate::sending::convert_to_outgoing_federation_event(event_json),
})
}
/// #GET /_matrix/federation/v1/event_auth/{room_id}/{event_id}
/// Retrieves the auth chain for a given event.
///
/// - This does not include the event itself
#[endpoint]
fn auth_chain(
_aa: AuthArgs,
args: EventAuthReqArgs,
depot: &mut Depot,
) -> JsonResult<EventAuthResBody> {
let origin = depot.origin()?;
crate::federation::access_check(origin, &args.room_id, None)?;
let event = timeline::get_pdu_json(&args.event_id)?.ok_or_else(|| {
warn!("event not found, event id: {:?}", &args.event_id);
MatrixError::not_found("event not found")
})?;
let room_id_str = event
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| AppError::internal("invalid event in database"))?;
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| AppError::internal("invalid room id field in event in database"))?;
let auth_chain_ids =
crate::room::auth_chain::get_auth_chain_ids(room_id, [&*args.event_id].into_iter())?;
json_ok(EventAuthResBody {
auth_chain: auth_chain_ids
.into_iter()
.filter_map(|id| timeline::get_pdu_json(&id).ok()?)
.map(crate::sending::convert_to_outgoing_federation_event)
.collect(),
})
}
#[endpoint]
async fn timestamp_to_event(
_aa: AuthArgs,
args: TimestampToEventReqArgs,
depot: &mut Depot,
) -> JsonResult<TimestampToEventResBody> {
let origin = depot.origin()?;
crate::federation::access_check(origin, &args.room_id, None)?;
let (event_id, origin_server_ts) =
crate::event::get_event_for_timestamp(&args.room_id, args.ts, args.dir)?;
json_ok(TimestampToEventResBody {
event_id,
origin_server_ts,
})
}
/// #POST /_matrix/federation/v1/get_missing_events/{room_id}
/// Retrieves events that the sender is missing.
#[endpoint]
fn missing_events(
_aa: AuthArgs,
room_id: PathParam<OwnedRoomId>,
body: JsonBody<MissingEventsReqBody>,
depot: &mut Depot,
) -> JsonResult<MissingEventsResBody> {
let origin = depot.origin()?;
let room_id = room_id.into_inner();
crate::federation::access_check(origin, &room_id, None)?;
let mut queued_events = body.latest_events.clone();
let mut events = Vec::new();
let mut i = 0;
while i < queued_events.len() && events.len() < body.limit {
let event_id = queued_events[i].clone();
if let Some(pdu) = timeline::get_pdu_json(&event_id)? {
let room_id_str = pdu
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| AppError::internal("invalid event in database"))?;
let event_room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| AppError::internal("invalid room id field in event in database"))?;
if event_room_id != room_id {
warn!(
"evil event detected: Event {} found while searching in room {}",
event_id, &room_id
);
return Err(MatrixError::invalid_param("evil event detected").into());
}
if body.earliest_events.contains(&event_id) {
i += 1;
continue;
}
queued_events.extend_from_slice(
&serde_json::from_value::<Vec<OwnedEventId>>(
serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| {
AppError::internal("Event in db has no prev_events field.")
})?)
.expect("canonical json is valid json value"),
)
.map_err(|_| AppError::internal("invalid prev_events content in pdu in db::"))?,
);
if i >= body.latest_events.len() {
events.push((
event_id,
crate::sending::convert_to_outgoing_federation_event(pdu),
));
}
} else {
warn!("event not found, event id: {:?}", event_id);
}
i += 1;
}
let events = events
.into_iter()
.rev()
.filter_map(|(event_id, event)| {
if state::server_can_see_event(origin, &room_id, &event_id).unwrap_or(false) {
Some(event)
} else {
None
}
})
.collect();
json_ok(MissingEventsResBody { events })
}
#[endpoint]
async fn exchange_third_party_invite(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/key.rs | crates/server/src/routing/federation/key.rs | //! Endpoints for handling keys for end-to-end encryption
use std::collections::BTreeMap;
use std::time::Duration;
use std::time::SystemTime;
use salvo::prelude::*;
use crate::core::federation::directory::ServerKeysResBody;
use crate::core::federation::discovery::{ServerSigningKeys, VerifyKey};
use crate::core::serde::{Base64, CanonicalJsonObject};
use crate::core::{OwnedServerSigningKeyId, UnixMillis};
use crate::{AuthArgs, EmptyResult, JsonResult, config, empty_ok, json_ok};
pub fn router() -> Router {
Router::with_path("key").oapi_tag("federation").push(
Router::with_path("v2")
.push(
Router::with_path("query")
.post(query_keys)
.push(Router::with_path("{server_name}").get(query_keys_from_server)),
)
.push(Router::with_path("server").get(server_signing_keys)),
)
}
#[endpoint]
async fn query_keys(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
#[endpoint]
async fn query_keys_from_server(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
/// #GET /_matrix/key/v2/server
/// Gets the public signing keys of this server.
///
/// - Matrix does not support invalidating public keys, so the key returned by this will be valid
/// forever.
// Response type for this endpoint is Json because we need to calculate a signature for the response
#[endpoint]
async fn server_signing_keys(_aa: AuthArgs) -> JsonResult<ServerKeysResBody> {
let conf = crate::config::get();
let mut verify_keys: BTreeMap<OwnedServerSigningKeyId, VerifyKey> = BTreeMap::new();
verify_keys.insert(
format!("ed25519:{}", config::keypair().version())
.try_into()
.expect("found invalid server signing keys in DB"),
VerifyKey {
key: Base64::new(config::keypair().public_key().to_vec()),
},
);
let server_keys = ServerSigningKeys {
server_name: conf.server_name.clone(),
verify_keys,
old_verify_keys: BTreeMap::new(),
signatures: BTreeMap::new(),
valid_until_ts: UnixMillis::from_system_time(
SystemTime::now() + Duration::from_secs(86400 * 7),
)
.expect("time is valid"),
};
let buf: Vec<u8> = crate::core::serde::json_to_buf(&server_keys)?;
let mut server_keys: CanonicalJsonObject = serde_json::from_slice(&buf)?;
crate::core::signatures::sign_json(
conf.server_name.as_str(),
config::keypair(),
&mut server_keys,
)?;
let server_keys: ServerSigningKeys =
serde_json::from_slice(&serde_json::to_vec(&server_keys).unwrap())?;
json_ok(ServerKeysResBody::new(server_keys))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/media.rs | crates/server/src/routing/federation/media.rs | use std::fs;
use std::io::Cursor;
use std::path::Path;
use std::str::FromStr;
use diesel::prelude::*;
use image::imageops::FilterType;
use mime::Mime;
use palpo_core::http_headers::ContentDispositionType;
use salvo::fs::NamedFile;
use salvo::prelude::*;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use crate::core::UnixMillis;
use crate::core::federation::media::*;
use crate::data::connect;
use crate::data::media::*;
use crate::data::schema::*;
use crate::media::get_media_path;
use crate::utils::content_disposition::make_content_disposition;
use crate::{AppResult, AuthArgs, MatrixError, config, hoops};
pub fn router() -> Router {
Router::with_path("media")
.hoop(hoops::limit_rate)
.push(Router::with_path("download/{media_id}").get(get_content))
.push(Router::with_path("thumbnail/{media_id}").get(get_thumbnail))
}
/// #GET /_matrix/media/r0/download/{server_name}/{media_id}
/// Load media from our server or over federation.
///
/// - Only allows federation if `allow_remote` is true
#[endpoint]
pub async fn get_content(
args: ContentReqArgs,
req: &mut Request,
res: &mut Response,
) -> AppResult<()> {
let server_name = &config::get().server_name;
if let Some(metadata) = crate::data::media::get_metadata(server_name, &args.media_id)? {
let content_type = metadata
.content_type
.as_deref()
.and_then(|c| Mime::from_str(c).ok())
.unwrap_or_else(|| {
metadata
.file_name
.as_ref()
.map(|name| mime_infer::from_path(name).first_or_octet_stream())
.unwrap_or(mime::APPLICATION_OCTET_STREAM)
});
let path = get_media_path(server_name, &args.media_id);
if Path::new(&path).exists() {
NamedFile::builder(path)
.content_type(content_type)
.send(req.headers(), res)
.await;
Ok(())
} else {
Err(MatrixError::not_yet_uploaded("Media has not been uploaded yet").into())
}
} else {
Err(MatrixError::not_yet_uploaded("Media has not been uploaded yet").into())
}
}
/// # `GET /_matrix/federation/v1/media/thumbnail/{serverName}/{mediaId}`
#[endpoint]
pub async fn get_thumbnail(
_aa: AuthArgs,
args: ThumbnailReqArgs,
_req: &mut Request,
res: &mut Response,
) -> AppResult<()> {
let server_name = &config::get().server_name;
if let Some(DbThumbnail { content_type, .. }) = crate::data::media::get_thumbnail_by_dimension(
server_name,
&args.media_id,
args.width,
args.height,
)? {
let thumb_path = get_media_path(
server_name,
&format!("{}.{}x{}", args.media_id, args.width, args.height),
);
let content_disposition = make_content_disposition(
Some(ContentDispositionType::Inline),
content_type.as_deref(),
None,
);
let content = Content {
file: fs::read(&thumb_path)?,
content_type,
content_disposition: Some(content_disposition),
};
res.render(ThumbnailResBody {
content: FileOrLocation::File(content),
metadata: ContentMetadata::new(),
});
return Ok(());
}
let (width, height, crop) =
crate::media::thumbnail_properties(args.width, args.height).unwrap_or((0, 0, false)); // 0, 0 because that's the original file
let thumb_path = get_media_path(server_name, &format!("{}.{width}x{height}", &args.media_id));
if let Some(DbThumbnail { content_type, .. }) =
crate::data::media::get_thumbnail_by_dimension(server_name, &args.media_id, width, height)?
{
// Using saved thumbnail
let content_disposition = make_content_disposition(
Some(ContentDispositionType::Inline),
content_type.as_deref(),
None,
);
let content = Content {
file: fs::read(&thumb_path)?,
content_type,
content_disposition: Some(content_disposition),
};
res.render(ThumbnailResBody {
content: FileOrLocation::File(content),
metadata: ContentMetadata::new(),
});
Ok(())
} else if let Ok(Some(DbMetadata {
disposition_type: _,
content_type,
..
})) = crate::data::media::get_metadata(server_name, &args.media_id)
{
// Generate a thumbnail
let image_path = get_media_path(server_name, &args.media_id);
if let Ok(image) = image::open(&image_path) {
let original_width = image.width();
let original_height = image.height();
if width > original_width || height > original_height {
let content_disposition = make_content_disposition(
Some(ContentDispositionType::Inline),
content_type.as_deref(),
None,
);
let content = Content {
file: fs::read(&image_path)?,
content_type,
content_disposition: Some(content_disposition),
};
res.render(ThumbnailResBody {
content: FileOrLocation::File(content),
metadata: ContentMetadata::new(),
});
return Ok(());
}
let thumbnail = if crop {
image.resize_to_fill(width, height, FilterType::CatmullRom)
} else {
let (exact_width, exact_height) = {
// Copied from image::dynimage::resize_dimensions
let ratio = u64::from(original_width) * u64::from(height);
let nratio = u64::from(width) * u64::from(original_height);
let use_width = nratio <= ratio;
let intermediate = if use_width {
u64::from(original_height) * u64::from(width) / u64::from(original_width)
} else {
u64::from(original_width) * u64::from(height) / u64::from(original_height)
};
if use_width {
if intermediate <= u64::from(u32::MAX) {
(width, intermediate as u32)
} else {
(
(u64::from(width) * u64::from(u32::MAX) / intermediate) as u32,
u32::MAX,
)
}
} else if intermediate <= u64::from(u32::MAX) {
(intermediate as u32, height)
} else {
(
u32::MAX,
(u64::from(height) * u64::from(u32::MAX) / intermediate) as u32,
)
}
};
image.thumbnail_exact(exact_width, exact_height)
};
let mut thumbnail_bytes = Vec::new();
thumbnail.write_to(
&mut Cursor::new(&mut thumbnail_bytes),
image::ImageFormat::Png,
)?;
// Save thumbnail in database so we don't have to generate it again next time
diesel::insert_into(media_thumbnails::table)
.values(&NewDbThumbnail {
media_id: args.media_id.clone(),
origin_server: server_name.to_owned(),
content_type: Some("mage/png".to_owned()),
disposition_type: None,
file_size: thumbnail_bytes.len() as i64,
width: width as i32,
height: height as i32,
resize_method: args.method.clone().unwrap_or_default().to_string(),
created_at: UnixMillis::now(),
})
.execute(&mut connect()?)?;
let mut f = File::create(&thumb_path).await?;
f.write_all(&thumbnail_bytes).await?;
let content_disposition = make_content_disposition(
Some(ContentDispositionType::Inline),
content_type.as_deref(),
None,
);
let content = Content {
file: thumbnail_bytes,
content_type,
content_disposition: Some(content_disposition),
};
res.render(ThumbnailResBody {
content: FileOrLocation::File(content),
metadata: ContentMetadata::new(),
});
Ok(())
} else {
let content_disposition = make_content_disposition(None, content_type.as_deref(), None);
let content = Content {
file: fs::read(&image_path)?,
content_type,
content_disposition: Some(content_disposition),
};
res.render(ThumbnailResBody {
content: FileOrLocation::File(content),
metadata: ContentMetadata::new(),
});
Ok(())
}
} else {
Err(MatrixError::not_found("file not found").into())
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/backfill.rs | crates/server/src/routing/federation/backfill.rs | use std::collections::BTreeMap;
use diesel::prelude::*;
use indexmap::IndexMap;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::federation::backfill::{BackfillReqArgs, BackfillResBody};
use crate::data::{connect, schema::*};
use crate::room::{state, timeline};
use crate::routing::prelude::*;
pub fn router() -> Router {
Router::with_path("backfill/{room_id}").get(get_backfill)
}
/// #GET /_matrix/federation/v1/backfill/{room_id}
/// Retrieves events from before the sender joined the room, if the room's
/// history visibility allows.
#[endpoint]
async fn get_backfill(
_aa: AuthArgs,
args: BackfillReqArgs,
depot: &mut Depot,
) -> JsonResult<BackfillResBody> {
let origin = depot.origin()?;
debug!("got backfill request from: {}", origin);
let seeds = events::table
.filter(events::id.eq_any(&args.v))
.select((events::id, events::depth))
.load::<(OwnedEventId, i64)>(&mut connect()?)?;
let mut queue = BTreeMap::new();
for (seed_id, seed_depth) in seeds {
queue.insert(seed_depth, seed_id);
}
let limit = args.limit;
let mut events = IndexMap::with_capacity(limit);
while !queue.is_empty() && events.len() < limit {
let Some((_depth, event_id)) = queue.pop_last() else {
break;
};
let mut prev_ids = event_edges::table
.filter(event_edges::event_id.eq(&event_id))
.select(event_edges::prev_id)
.load::<OwnedEventId>(&mut connect()?)?;
prev_ids.retain(|p| !events.contains_key(p));
if !events.contains_key(&event_id) {
if let Ok((pdu, data)) = timeline::get_pdu_and_data(&event_id)
&& state::server_can_see_event(origin, &args.room_id, &pdu.event_id)?
{
events.insert(
event_id.clone(),
crate::sending::convert_to_outgoing_federation_event(data),
);
}
}
let prevs = events::table
.filter(events::id.eq_any(&prev_ids))
.select((events::id, events::depth))
.load::<(OwnedEventId, i64)>(&mut connect()?)?;
for (prev_id, prev_depth) in prevs {
queue.insert(prev_depth, prev_id);
}
}
json_ok(BackfillResBody {
origin: config::get().server_name.to_owned(),
origin_server_ts: UnixMillis::now(),
pdus: events.into_values().collect(),
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/membership.rs | crates/server/src/routing/federation/membership.rs | use diesel::prelude::*;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde_json::json;
use serde_json::value::to_raw_value;
use crate::core::UnixMillis;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::events::{StateEventType, TimelineEventType};
use crate::core::federation::membership::*;
use crate::core::identifiers::*;
use crate::core::room::{JoinRule, RoomEventReqArgs};
use crate::core::serde::{CanonicalJsonObject, CanonicalJsonValue, to_canonical_object};
use crate::data::connect;
use crate::data::room::NewDbEvent;
use crate::data::schema::*;
use crate::event::handler;
use crate::federation::maybe_strip_event_id;
use crate::room::{ensure_room, timeline};
use crate::{
DepotExt, EmptyResult, IsRemoteOrLocal, JsonResult, MatrixError, PduBuilder, SnPduEvent,
config, data, empty_ok, json_ok, membership, room,
};
pub fn router_v1() -> Router {
Router::new()
.push(Router::with_path("make_join/{room_id}/{user_id}").get(make_join))
.push(Router::with_path("invite/{room_id}/{event_id}").put(invite_user))
.push(Router::with_path("make_leave/{room_id}/{user_id}").get(make_leave))
.push(Router::with_path("send_join/{room_id}/{event_id}").put(send_join_v1))
.push(Router::with_path("send_leave/{room_id}/{event_id}").put(send_leave))
}
pub fn router_v2() -> Router {
Router::new()
.push(Router::with_path("make_join/{room_id}/{user_id}").get(make_join))
.push(Router::with_path("invite/{room_id}/{event_id}").put(invite_user))
.push(Router::with_path("make_leave/{room_id}/{user_id}").get(make_leave))
.push(Router::with_path("send_join/{room_id}/{event_id}").put(send_join_v2))
.push(Router::with_path("send_leave/{room_id}/{event_id}").put(send_leave))
}
/// #GET /_matrix/federation/v1/make_join/{room_id}/{user_id}
/// Creates a join template.
#[endpoint]
async fn make_join(args: MakeJoinReqArgs, depot: &mut Depot) -> JsonResult<MakeJoinResBody> {
if !room::room_exists(&args.room_id)? {
return Err(MatrixError::not_found("Room is unknown to this server.").into());
}
let origin = depot.origin()?;
if args.user_id.server_name() != origin {
return Err(
MatrixError::bad_json("Not allowed to join on behalf of another server/user.").into(),
);
}
handler::acl_check(args.user_id.server_name(), &args.room_id)?;
let room_version_id = room::get_version(&args.room_id)?;
if !args.ver.contains(&room_version_id) {
return Err(MatrixError::incompatible_room_version(
"Room version not supported.",
room_version_id,
)
.into());
}
let state_lock = crate::room::lock_state(&args.room_id).await;
if args.user_id.is_remote()
&& args.room_id.is_remote()
&& !room::is_server_joined(&config::get().server_name, &args.room_id)?
{
return Err(MatrixError::bad_json("Not allowed to join on unkonwn remote server.").into());
}
let join_authorized_via_users_server: Option<OwnedUserId> = {
use RoomVersionId::*;
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
// room version does not support restricted join rules
None
} else {
let join_rule = room::get_join_rule(&args.room_id)?;
let guest_can_join = room::guest_can_join(&args.room_id);
if join_rule == JoinRule::Public || guest_can_join {
None
} else if crate::federation::user_can_perform_restricted_join(
&args.user_id,
&args.room_id,
&room_version_id,
Some(&join_rule),
)
.await?
{
membership::get_first_user_can_issue_invite(
&args.room_id,
&args.user_id,
&join_rule.restriction_rooms(),
)
.await
.ok()
} else {
return Err(MatrixError::unable_to_grant_join(
"no user on this server is able to assist in joining",
)
.into());
}
}
};
let content = to_raw_value(&RoomMemberEventContent {
avatar_url: None,
blurhash: None,
display_name: None,
is_direct: None,
membership: MembershipState::Join,
third_party_invite: None,
reason: None,
join_authorized_via_users_server,
extra_data: Default::default(),
})
.expect("member event is valid value");
let (_pdu, mut pdu_json) = PduBuilder {
event_type: TimelineEventType::RoomMember,
content,
state_key: Some(args.user_id.to_string()),
..Default::default()
}
.hash_sign(&args.user_id, &args.room_id, &room_version_id)
.await?;
drop(state_lock);
maybe_strip_event_id(&mut pdu_json, &room_version_id);
let body = MakeJoinResBody {
room_version: Some(room_version_id),
event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"),
};
json_ok(body)
}
/// #PUT /_matrix/federation/v2/invite/{room_id}/{event_id}
/// Invites a remote user to a room.
#[endpoint]
async fn invite_user(
args: RoomEventReqArgs,
body: JsonBody<InviteUserReqBodyV2>,
depot: &mut Depot,
) -> JsonResult<InviteUserResBodyV2> {
let body = body.into_inner();
let origin = depot.origin()?;
let conf = config::get();
handler::acl_check(origin, &args.room_id)?;
if !config::supported_room_versions().contains(&body.room_version) {
return Err(MatrixError::incompatible_room_version(
"server does not support this room version",
body.room_version.clone(),
)
.into());
}
let mut signed_event = to_canonical_object(&body.event)
.map_err(|_| MatrixError::invalid_param("invite event is invalid"))?;
let invitee_id: OwnedUserId = serde_json::from_value(
signed_event
.get("state_key")
.ok_or(MatrixError::invalid_param("event had no state_key field"))?
.clone()
.into(),
)
.map_err(|_| MatrixError::invalid_param("state_key is not a user id"))?;
if invitee_id.server_name().is_remote() {
return Err(MatrixError::invalid_param("cannot invite remote users").into());
}
let invitee = data::user::get_user(&invitee_id)
.map_err(|_| MatrixError::not_found("invitee user not found"))?;
handler::acl_check(invitee_id.server_name(), &args.room_id)?;
crate::server_key::hash_and_sign_event(&mut signed_event, &body.room_version)
.map_err(|e| MatrixError::invalid_param(format!("failed to sign event: {e}")))?;
// Generate event id
let event_id = crate::event::gen_event_id(&signed_event, &body.room_version)?;
// Add event_id back
signed_event.insert(
"event_id".to_owned(),
CanonicalJsonValue::String(event_id.to_string()),
);
let state_lock = room::lock_state(&args.room_id).await;
ensure_room(&args.room_id, &body.room_version)?;
if data::room::is_banned(&args.room_id)? {
return Err(MatrixError::forbidden("this room is banned on this homeserver", None).into());
}
if conf.block_non_admin_invites && !invitee.is_admin {
return Err(MatrixError::forbidden("this server does not allow room invites", None).into());
}
let mut invite_state = body.invite_room_state.clone();
// If we are active in the room, the remote server will notify us about the join via /send.
// If we are not in the room, we need to manually
// record the invited state for client /sync through update_membership(), and
// send the invite PDU to the relevant appservices.
// if !room::is_server_joined(&config::get().server_name, &args.room_id)? {
let mut event: CanonicalJsonObject = serde_json::from_str(body.event.get())
.map_err(|_| MatrixError::invalid_param("invalid invite event bytes"))?;
// let event_id: OwnedEventId = format!("$dummy_{}", Ulid::new().to_string()).try_into()?;
event.insert("event_id".to_owned(), event_id.to_string().into());
let (event_sn, event_guard) = crate::event::ensure_event_sn(&args.room_id, &event_id)?;
let pdu = SnPduEvent::from_canonical_object(
&args.room_id,
&event_id,
event_sn,
event.clone(),
false,
false,
false,
)
.map_err(|e| {
warn!("invalid invite event: {}", e);
MatrixError::invalid_param("invalid invite event")
})?;
invite_state.push(pdu.to_stripped_state_event());
NewDbEvent {
id: pdu.event_id.to_owned(),
sn: pdu.event_sn,
ty: pdu.event_ty.to_string(),
room_id: pdu.room_id.to_owned(),
unrecognized_keys: None,
depth: pdu.depth as i64,
topological_ordering: pdu.depth as i64,
stream_ordering: pdu.event_sn,
origin_server_ts: UnixMillis::now(),
received_at: None,
sender_id: Some(pdu.sender.clone()),
contains_url: false,
worker_id: None,
state_key: pdu.state_key.clone(),
is_outlier: false,
soft_failed: false,
is_rejected: false,
rejection_reason: None,
}
.save()?;
timeline::append_pdu(&pdu, event, &state_lock).await?;
// let sender_id: OwnedUserId = serde_json::from_value(
// signed_event
// .get("sender")
// .ok_or(MatrixError::invalid_param("event had no sender field"))?
// .clone()
// .into(),
// )
// .map_err(|_| MatrixError::invalid_param("sender is not a user id"))?;
diesel::update(
room_users::table.filter(
room_users::room_id
.eq(&args.room_id)
.and(room_users::user_id.eq(&invitee_id))
.and(room_users::membership.eq(MembershipState::Invite.to_string())),
),
)
.set(room_users::state_data.eq(json!(invite_state)))
.execute(&mut connect()?)
.ok();
drop(event_guard);
// }
drop(state_lock);
json_ok(InviteUserResBodyV2 {
event: crate::sending::convert_to_outgoing_federation_event(signed_event),
})
}
/// # `GET /_matrix/federation/v1/make_leave/{roomId}/userId}`
#[endpoint]
async fn make_leave(args: MakeLeaveReqArgs, depot: &mut Depot) -> JsonResult<MakeLeaveResBody> {
let origin = depot.origin()?;
if args.user_id.server_name() != origin {
return Err(
MatrixError::bad_json("not allowed to leave on behalf of another server").into(),
);
}
if !room::is_room_exists(&args.room_id)? {
return Err(MatrixError::forbidden("room is unknown to this server", None).into());
}
// ACL check origin
handler::acl_check(origin, &args.room_id)?;
let room_version_id = room::get_version(&args.room_id)?;
let state_lock = crate::room::lock_state(&args.room_id).await;
let (_pdu, mut pdu_json) = PduBuilder::state(
args.user_id.to_string(),
&RoomMemberEventContent::new(MembershipState::Leave),
)
.hash_sign(&args.user_id, &args.room_id, &room_version_id)
.await?;
drop(state_lock);
// room v3 and above removed the "event_id" field from remote PDU format
maybe_strip_event_id(&mut pdu_json, &room_version_id);
json_ok(MakeLeaveResBody {
room_version: Some(room_version_id),
event: to_raw_value(&pdu_json).expect("canonicalJson can be serialized to JSON"),
})
}
/// #PUT /_matrix/federation/v2/send_join/{room_id}/{event_id}
/// Invites a remote user to a room.
#[endpoint]
async fn send_join_v2(
depot: &mut Depot,
args: RoomEventReqArgs,
body: JsonBody<SendJoinReqBody>,
) -> JsonResult<SendJoinResBodyV2> {
let body = body.into_inner();
// let server_name = args.room_id.server_name().map_err(AppError::public)?;
// handler::acl_check(&server_name, &args.room_id)?;
let room_state =
crate::federation::membership::send_join_v2(depot.origin()?, &args.room_id, &body.0)
.await?;
json_ok(SendJoinResBodyV2(room_state))
}
/// #PUT /_matrix/federation/v1/send_join/{room_id}/{event_id}
/// Submits a signed join event.
#[endpoint]
async fn send_join_v1(
depot: &mut Depot,
args: RoomEventReqArgs,
body: JsonBody<SendJoinReqBody>,
) -> JsonResult<SendJoinResBodyV1> {
let body = body.into_inner();
let room_state =
crate::federation::membership::send_join_v1(depot.origin()?, &args.room_id, &body.0)
.await?;
json_ok(SendJoinResBodyV1(room_state))
}
/// #PUT /_matrix/federation/v2/send_leave/{roomId}/{eventId}
///
/// Submits a signed leave event.
#[endpoint]
async fn send_leave(
depot: &mut Depot,
args: SendLeaveReqArgsV2,
body: JsonBody<SendLeaveReqBody>,
) -> EmptyResult {
let origin = depot.origin()?;
let body = body.into_inner();
if !room::is_room_exists(&args.room_id)? {
return Err(MatrixError::forbidden("Room is unknown to this server.", None).into());
}
handler::acl_check(origin, &args.room_id)?;
// We do not add the event_id field to the pdu here because of signature and hashes checks
let room_version_id = room::get_version(&args.room_id)?;
let Ok((event_id, value)) =
crate::event::gen_event_id_canonical_json(&body.0, &room_version_id)
else {
// Event could not be converted to canonical json
return Err(
MatrixError::invalid_param("Could not convert event to canonical json.").into(),
);
};
let event_room_id: OwnedRoomId = serde_json::from_value(
serde_json::to_value(
value
.get("room_id")
.ok_or_else(|| MatrixError::bad_json("Event missing room_id property."))?,
)
.expect("CanonicalJson is valid json value"),
)
.map_err(|e| MatrixError::bad_json(format!("room_id field is not a valid room ID: {e}")))?;
if event_room_id != args.room_id {
return Err(
MatrixError::bad_json("Event room_id does not match request path room ID.").into(),
);
}
let content: RoomMemberEventContent = serde_json::from_value(
value
.get("content")
.ok_or_else(|| MatrixError::bad_json("Event missing content property"))?
.clone()
.into(),
)
.map_err(|_| MatrixError::bad_json("Event content is empty or invalid"))?;
if content.membership != MembershipState::Leave {
return Err(MatrixError::bad_json(
"Not allowed to send a non-leave membership event to leave endpoint.",
)
.into());
}
let event_type: StateEventType = serde_json::from_value(
value
.get("type")
.ok_or_else(|| MatrixError::bad_json("Event missing type property."))?
.clone()
.into(),
)
.map_err(|_| MatrixError::bad_json("Event does not have a valid state event type."))?;
if event_type != StateEventType::RoomMember {
return Err(MatrixError::invalid_param(
"Not allowed to send non-membership state event to leave endpoint.",
)
.into());
}
// ACL check sender server name
let sender: OwnedUserId = serde_json::from_value(
value
.get("sender")
.ok_or_else(|| MatrixError::bad_json("event missing sender property"))?
.clone()
.into(),
)
.map_err(|_| MatrixError::bad_json("user in sender is invalid"))?;
handler::acl_check(sender.server_name(), &args.room_id)?;
if sender.server_name() != origin {
return Err(
MatrixError::bad_json("not allowed to leave on behalf of another server.").into(),
);
}
let state_key: OwnedUserId = serde_json::from_value(
value
.get("state_key")
.ok_or_else(|| MatrixError::invalid_param("event missing state_key property"))?
.clone()
.into(),
)
.map_err(|_| MatrixError::bad_json("state_key is invalid or not a user id"))?;
if state_key != sender {
return Err(MatrixError::bad_json("state_key does not match sender user").into());
}
handler::process_incoming_pdu(
origin,
&event_id,
&args.room_id,
&room_version_id,
value,
true,
false,
)
.await?;
if let Err(e) = crate::sending::send_pdu_room(&args.room_id, &event_id, &[], &[]) {
error!("failed to notify leave event: {e}");
}
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/query.rs | crates/server/src/routing/federation/query.rs | //! Endpoints to retrieve information from a homeserver about a resource.
use palpo_core::federation::query::ProfileReqArgs;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::federation::query::RoomInfoResBody;
use crate::core::identifiers::*;
use crate::core::user::{ProfileField, ProfileResBody};
use crate::{
AuthArgs, EmptyResult, IsRemoteOrLocal, JsonResult, MatrixError, config, data, empty_ok,
json_ok,
};
pub fn router() -> Router {
Router::with_path("query")
.push(Router::with_path("profile").get(get_profile))
.push(Router::with_path("directory").get(get_directory))
.push(Router::with_path("{query_type}").get(query_by_type))
}
/// #GET /_matrix/federation/v1/query/profile
/// Gets information on a profile.
#[endpoint]
async fn get_profile(_aa: AuthArgs, args: ProfileReqArgs) -> JsonResult<ProfileResBody> {
if args.user_id.server_name().is_remote() {
return Err(MatrixError::invalid_param("User does not belong to this server.").into());
}
let mut display_name = None;
let mut avatar_url = None;
let mut blurhash = None;
let profile = data::user::get_profile(&args.user_id, None)?
.ok_or(MatrixError::not_found("Profile not found."))?;
match &args.field {
Some(ProfileField::DisplayName) => display_name = profile.display_name.clone(),
Some(ProfileField::AvatarUrl) => {
avatar_url = profile.avatar_url.clone();
blurhash = profile.blurhash.clone();
}
// TODO: what to do with custom
Some(_) => {}
None => {
display_name = profile.display_name.clone();
avatar_url = profile.avatar_url.clone();
blurhash = profile.blurhash.clone();
}
}
json_ok(ProfileResBody {
blurhash,
display_name,
avatar_url,
})
}
/// #GET /_matrix/federation/v1/query/directory
/// Resolve a room alias to a room id.
#[endpoint]
async fn get_directory(
_aa: AuthArgs,
room_alias: QueryParam<OwnedRoomAliasId, true>,
) -> JsonResult<RoomInfoResBody> {
let room_id = crate::room::resolve_local_alias(&room_alias)?;
let mut servers = crate::room::lookup_servers(&room_id)?;
servers.insert(0, config::get().server_name.to_owned());
servers.dedup();
json_ok(RoomInfoResBody { room_id, servers })
}
#[endpoint]
async fn query_by_type(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/openid.rs | crates/server/src/routing/federation/openid.rs | use salvo::prelude::*;
use crate::core::federation::openid::{UserInfoReqArgs, UserInfoResBody};
use crate::{AuthArgs, JsonResult, json_ok};
pub fn router() -> Router {
Router::with_path("openid/userinfo").get(user_info)
}
#[endpoint]
async fn user_info(_aa: AuthArgs, args: UserInfoReqArgs) -> JsonResult<UserInfoResBody> {
let user_id = crate::user::find_from_openid_token(&args.access_token).await?;
json_ok(UserInfoResBody::new(user_id))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/space.rs | crates/server/src/routing/federation/space.rs | use futures_util::{FutureExt, StreamExt, stream};
use salvo::prelude::*;
use crate::core::MatrixError;
use crate::core::federation::space::{HierarchyReqArgs, HierarchyResBody};
use crate::room::space::{Identifier, SummaryAccessibility, get_parent_children_via};
use crate::{AuthArgs, DepotExt, JsonResult, json_ok};
pub fn router() -> Router {
Router::with_path("hierarchy/{room_id}").get(get_hierarchy)
}
/// # `GET /_matrix/federation/v1/hierarchy/{room_id}`
///
/// Gets the space tree in a depth-first manner to locate child rooms of a given
/// space.
#[endpoint]
async fn get_hierarchy(
_aa: AuthArgs,
args: HierarchyReqArgs,
depot: &mut Depot,
) -> JsonResult<HierarchyResBody> {
if !crate::room::room_exists(&args.room_id)? {
return Err(MatrixError::not_found("Room does not exist.").into());
}
let origin = depot.origin()?;
let room_id = &args.room_id;
let suggested_only = args.suggested_only;
let identifier = &Identifier::ServerName(origin);
match crate::room::space::get_summary_and_children_local(room_id, identifier, suggested_only)
.await?
{
None => Err(MatrixError::not_found("The requested room was not found").into()),
Some(SummaryAccessibility::Inaccessible) => {
Err(MatrixError::not_found("The requested room is inaccessible").into())
}
Some(SummaryAccessibility::Accessible(room)) => {
let children_via = get_parent_children_via(&room, suggested_only);
let (children, inaccessible_children) = stream::iter(children_via)
.filter_map(|(child, _via)| async move {
match crate::room::space::get_summary_and_children_local(
&child,
identifier,
suggested_only,
)
.await
.ok()?
{
None => None,
Some(SummaryAccessibility::Inaccessible) => Some((None, Some(child))),
Some(SummaryAccessibility::Accessible(summary)) => {
Some((Some(summary), None))
}
}
})
.unzip()
.map(|(children, inaccessible_children): (Vec<_>, Vec<_>)| {
(
children.into_iter().flatten().map(Into::into).collect(),
inaccessible_children.into_iter().flatten().collect(),
)
})
.await;
json_ok(HierarchyResBody {
room,
children,
inaccessible_children,
})
}
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/threepid.rs | crates/server/src/routing/federation/threepid.rs | use salvo::prelude::*;
use crate::{AuthArgs, EmptyResult, empty_ok};
pub fn router() -> Router {
Router::with_path("3pid/onbind").put(on_bind)
}
#[endpoint]
async fn on_bind(_aa: AuthArgs) -> EmptyResult {
// TODO: todo
empty_ok()
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/room.rs | crates/server/src/routing/federation/room.rs | use salvo::oapi::extract::*;
use salvo::prelude::*;
use serde_json::value::to_raw_value;
use crate::core::client::directory::{PublicRoomsFilteredReqBody, PublicRoomsReqArgs};
use crate::core::directory::{PublicRoomFilter, PublicRoomsResBody, RoomNetwork};
use crate::core::events::StateEventType;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::federation::event::{
RoomStateAtEventReqArgs, RoomStateIdsResBody, RoomStateReqArgs, RoomStateResBody,
};
use crate::core::federation::knock::{
MakeKnockReqArgs, MakeKnockResBody, SendKnockReqArgs, SendKnockReqBody, SendKnockResBody,
};
use crate::core::identifiers::*;
use crate::core::serde::JsonObject;
use crate::event::gen_event_id_canonical_json;
use crate::event::handler;
use crate::room::{state, timeline};
use crate::{
AuthArgs, DepotExt, IsRemoteOrLocal, JsonResult, MatrixError, PduBuilder, PduEvent, data,
json_ok, room, sending,
};
pub fn router() -> Router {
Router::new()
.push(Router::with_path("state/{room_id}").get(get_state))
.push(
Router::with_path("publicRooms")
.get(get_public_rooms)
.post(get_filtered_public_rooms),
)
.push(Router::with_path("send_knock/{room_id}/{event_id}").put(send_knock))
.push(Router::with_path("make_knock/{room_id}/{user_id}").get(make_knock))
.push(Router::with_path("state_ids/{room_id}").get(get_state_at_event))
}
/// #GET /_matrix/federation/v1/state/{room_id}
/// Retrieves the current state of the room.
#[endpoint]
async fn get_state(
_aa: AuthArgs,
args: RoomStateReqArgs,
depot: &mut Depot,
) -> JsonResult<RoomStateResBody> {
let origin = depot.origin()?;
crate::federation::access_check(origin, &args.room_id, None)?;
let state_hash = state::get_pdu_frame_id(&args.event_id)?;
let pdus = state::get_full_state_ids(state_hash)?
.into_values()
.map(|id| {
sending::convert_to_outgoing_federation_event(
timeline::get_pdu_json(&id).unwrap().unwrap(),
)
})
.collect();
let auth_chain_ids =
room::auth_chain::get_auth_chain_ids(&args.room_id, [&*args.event_id].into_iter())?;
json_ok(RoomStateResBody {
auth_chain: auth_chain_ids
.into_iter()
.filter_map(|id| match timeline::get_pdu_json(&id).ok()? {
Some(json) => Some(crate::sending::convert_to_outgoing_federation_event(json)),
None => {
error!("Could not find event json for {id} in db::");
None
}
})
.collect(),
pdus,
})
}
/// #GET /_matrix/federation/v1/publicRooms
/// Lists the public rooms on this server.
#[endpoint]
async fn get_public_rooms(
_aa: AuthArgs,
args: PublicRoomsReqArgs,
) -> JsonResult<PublicRoomsResBody> {
let body = crate::directory::get_public_rooms(
None,
args.limit,
args.since.as_deref(),
&PublicRoomFilter::default(),
&RoomNetwork::Matrix,
)
.await?;
json_ok(body)
}
/// #POST /_matrix/federation/v1/publicRooms
/// Lists the public rooms on this server.
#[endpoint]
async fn get_filtered_public_rooms(
_aa: AuthArgs,
args: JsonBody<PublicRoomsFilteredReqBody>,
) -> JsonResult<PublicRoomsResBody> {
let body = crate::directory::get_public_rooms(
args.server.as_deref(),
args.limit,
args.since.as_deref(),
&args.filter,
&args.room_network,
)
.await?;
json_ok(body)
}
/// # `PUT /_matrix/federation/v1/send_knock/{roomId}/{eventId}`
///
/// Submits a signed knock event.
#[endpoint]
async fn send_knock(
_aa: AuthArgs,
args: SendKnockReqArgs,
_req: &mut Request,
body: JsonBody<SendKnockReqBody>,
depot: &mut Depot,
) -> JsonResult<SendKnockResBody> {
use crate::core::RoomVersionId::*;
let origin = depot.origin()?;
let body: SendKnockReqBody = body.into_inner();
if args.room_id.is_remote() {
return Err(MatrixError::not_found("room is unknown to this server").into());
}
// ACL check origin server
handler::acl_check(origin, &args.room_id)?;
let room_version = crate::room::get_version(&args.room_id)?;
if matches!(room_version, V1 | V2 | V3 | V4 | V5 | V6) {
return Err(MatrixError::forbidden("room version does not support knocking", None).into());
}
let Ok((event_id, value)) = gen_event_id_canonical_json(&body.0, &room_version) else {
// Event could not be converted to canonical json
return Err(MatrixError::invalid_param("could not convert event to canonical json").into());
};
let event_type: StateEventType = serde_json::from_value(
value
.get("type")
.ok_or_else(|| MatrixError::invalid_param("event has no event type"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::invalid_param(format!("event has invalid event type: {e}")))?;
if event_type != StateEventType::RoomMember {
return Err(MatrixError::invalid_param(
"not allowed to send non-membership state event to knock endpoint",
)
.into());
}
let content: RoomMemberEventContent = serde_json::from_value(
value
.get("content")
.ok_or_else(|| MatrixError::invalid_param("membership event has no content"))?
.clone()
.into(),
)
.map_err(|e| {
MatrixError::invalid_param(format!("event has invalid membership content: {e}"))
})?;
if content.membership != MembershipState::Knock {
return Err(MatrixError::invalid_param(
"not allowed to send a non-knock membership event to knock endpoint",
)
.into());
}
// ACL check sender server name
let sender: OwnedUserId = serde_json::from_value(
value
.get("sender")
.ok_or_else(|| MatrixError::invalid_param("event has no sender user id"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::invalid_param(format!("event sender is not a valid user id: {e}")))?;
handler::acl_check(sender.server_name(), &args.room_id)?;
// check if origin server is trying to send for another server
if sender.server_name() != origin {
return Err(MatrixError::bad_json(
"Not allowed to knock on behalf of another server/user.",
)
.into());
}
let state_key: OwnedUserId = serde_json::from_value(
value
.get("state_key")
.ok_or_else(|| MatrixError::invalid_param("event does not have a state_key"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::bad_json(format!("event does not have a valid state_key: {e}")))?;
if state_key != sender {
return Err(
MatrixError::invalid_param("state_key does not match sender user of event.").into(),
);
};
let origin: OwnedServerName = serde_json::from_value(
value
.get("origin")
.ok_or_else(|| MatrixError::bad_json("event does not have an origin server name"))?
.clone()
.into(),
)
.map_err(|e| MatrixError::bad_json(format!("event has an invalid origin server name: {e}")))?;
let event: JsonObject = serde_json::from_str(body.0.get())
.map_err(|e| MatrixError::invalid_param(format!("invalid knock event PDU: {e}")))?;
let pdu: PduEvent = PduEvent::from_json_value(&args.room_id, &event_id, event.into())
.map_err(|e| MatrixError::invalid_param(format!("invalid knock event pdu: {e}")))?;
handler::process_incoming_pdu(
&origin,
&event_id,
&args.room_id,
&room_version,
value.clone(),
true,
false,
)
.await
.map_err(|e| {
error!(
error = %e,
room_id = %args.room_id, "could not accept as timeline event {}", event_id
);
MatrixError::invalid_param(format!("could not accept as timeline event"))
})?;
data::room::add_joined_server(&args.room_id, &origin)?;
let knock_room_state = state::summary_stripped(&pdu)?;
if let Err(e) = crate::sending::send_pdu_room(&args.room_id, &event_id, &[], &[]) {
error!("failed to notify knock event: {e}");
}
json_ok(SendKnockResBody { knock_room_state })
}
/// # `GET /_matrix/federation/v1/make_knock/{room_id}/{user_id}`
///
/// Creates a knock template.
#[endpoint]
async fn make_knock(
_aa: AuthArgs,
args: MakeKnockReqArgs,
depot: &mut Depot,
) -> JsonResult<MakeKnockResBody> {
use crate::core::RoomVersionId::*;
let origin = depot.origin()?;
if !crate::room::room_exists(&args.room_id)? {
return Err(MatrixError::not_found("room is unknown to this server").into());
}
if args.user_id.server_name() != origin {
return Err(
MatrixError::bad_json("not allowed to knock on behalf of another server/user").into(),
);
}
// ACL check origin server
handler::acl_check(origin, &args.room_id)?;
let room_version_id = crate::room::get_version(&args.room_id)?;
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) {
return Err(MatrixError::incompatible_room_version(
"room version does not support knocking",
room_version_id,
)
.into());
}
// if !args.ver.contains(&room_version_id) {
// return Err(MatrixError::incompatible_room_version(
// room_version_id,
// "Your homeserver does not support the features required to knock on this room.",
// ));
// }
let state_lock = room::lock_state(&args.room_id).await;
if let Ok(member) = room::get_member(&args.room_id, &args.user_id, None)
&& member.membership == MembershipState::Ban
{
warn!(
"remote user {} is banned from {} but attempted to knock",
&args.user_id, &args.room_id
);
return Err(
MatrixError::forbidden("you cannot knock on a room you are banned from", None).into(),
);
}
let (_pdu, mut pdu_json) = PduBuilder::state(
args.user_id.to_string(),
&RoomMemberEventContent::new(MembershipState::Knock),
)
.hash_sign(&args.user_id, &args.room_id, &room_version_id)
.await?;
drop(state_lock);
// room v3 and above removed the "event_id" field from remote PDU format
crate::federation::maybe_strip_event_id(&mut pdu_json, &room_version_id);
json_ok(MakeKnockResBody {
room_version: room_version_id,
event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to json"),
})
}
/// #GET /_matrix/federation/v1/state_ids/{room_id}
/// Retrieves the current state of the room.
#[endpoint]
fn get_state_at_event(
depot: &mut Depot,
args: RoomStateAtEventReqArgs,
) -> JsonResult<RoomStateIdsResBody> {
let origin = depot.origin()?;
crate::federation::access_check(origin, &args.room_id, Some(&args.event_id))?;
let frame_id = state::get_pdu_frame_id(&args.event_id)?;
let pdu_ids = state::get_full_state_ids(frame_id)?
.into_values()
.map(|id| (*id).to_owned())
.collect();
let auth_chain_ids =
crate::room::auth_chain::get_auth_chain_ids(&args.room_id, [&*args.event_id].into_iter())?;
json_ok(RoomStateIdsResBody {
auth_chain_ids: auth_chain_ids
.into_iter()
.map(|id| (*id).to_owned())
.collect(),
pdu_ids,
})
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/routing/federation/transaction.rs | crates/server/src/routing/federation/transaction.rs | use std::collections::BTreeMap;
use std::time::Instant;
use salvo::oapi::extract::*;
use salvo::prelude::*;
use crate::core::UnixMillis;
use crate::core::device::{DeviceListUpdateContent, DirectDeviceContent};
use crate::core::events::receipt::{
ReceiptContent, ReceiptEvent, ReceiptEventContent, ReceiptType,
};
use crate::core::events::typing::TypingContent;
use crate::core::federation::transaction::{
Edu, SendMessageReqBody, SendMessageResBody, SigningKeyUpdateContent,
};
use crate::core::identifiers::*;
use crate::core::presence::PresenceContent;
use crate::core::serde::RawJsonValue;
use crate::core::to_device::DeviceIdOrAllDevices;
use crate::data::user::NewDbPresence;
use crate::event::{handler, parse_incoming_pdu};
use crate::sending::{EDU_LIMIT, PDU_LIMIT};
use crate::{AppError, AppResult, DepotExt, JsonResult, MatrixError, data, json_ok, room};
pub fn router() -> Router {
Router::with_path("send/{txn_id}").put(send_message)
}
/// #PUT /_matrix/federation/v1/send/{txn_id}
/// Push EDUs and PDUs to this server.
#[endpoint]
async fn send_message(
depot: &mut Depot,
_txn_id: PathParam<OwnedTransactionId>,
body: JsonBody<SendMessageReqBody>,
) -> JsonResult<SendMessageResBody> {
let origin = depot.origin()?;
let body = body.into_inner();
if &body.origin != origin {
return Err(MatrixError::forbidden(
"not allowed to send transactions on behalf of other servers",
None,
)
.into());
}
if body.pdus.len() > PDU_LIMIT {
return Err(MatrixError::forbidden(
"not allowed to send more than {PDU_LIMIT} PDUs in one transaction",
None,
)
.into());
}
if body.edus.len() > EDU_LIMIT {
return Err(MatrixError::forbidden(
"not allowed to send more than {EDU_LIMIT} EDUs in one transaction",
None,
)
.into());
}
let txn_start_time = Instant::now();
let resolved_map = process_pdus(&body.pdus, &body.origin, &txn_start_time).await?;
process_edus(body.edus, &body.origin).await;
json_ok(SendMessageResBody {
pdus: resolved_map
.into_iter()
.map(|(e, r)| (e, r.map_err(|e| e.to_string())))
.collect(),
})
}
async fn process_pdus(
pdus: &[Box<RawJsonValue>],
origin: &ServerName,
txn_start_time: &Instant,
) -> AppResult<BTreeMap<OwnedEventId, AppResult<()>>> {
let mut parsed_pdus = Vec::with_capacity(pdus.len());
for pdu in pdus {
parsed_pdus.push(match parse_incoming_pdu(pdu) {
Ok(t) => t,
Err(e) => {
warn!("could not parse pdu: {e}");
continue;
}
});
}
let mut resolved_map = BTreeMap::new();
for (event_id, value, room_id, room_version_id) in parsed_pdus {
// crate::server::check_running()?;
let pdu_start_time = Instant::now();
let result = handler::process_incoming_pdu(
origin,
&event_id,
&room_id,
&room_version_id,
value,
true,
false,
)
.await;
debug!(
pdu_elapsed = ?pdu_start_time.elapsed(),
txn_elapsed = ?txn_start_time.elapsed(),
"finished pdu {event_id}",
);
resolved_map.insert(event_id, result);
}
for (id, result) in &resolved_map {
if let Err(e) = result
&& matches!(e, AppError::Matrix(_))
{
warn!("incoming pdu failed {id}: {e:?}");
}
}
Ok(resolved_map)
}
async fn process_edus(edus: Vec<Edu>, origin: &ServerName) {
for edu in edus {
match edu {
Edu::Presence(presence) => process_edu_presence(origin, presence).await,
Edu::Receipt(receipt) => process_edu_receipt(origin, receipt).await,
Edu::Typing(typing) => process_edu_typing(origin, typing).await,
Edu::DeviceListUpdate(content) => process_edu_device_list_update(origin, content).await,
Edu::DirectToDevice(content) => process_edu_direct_to_device(origin, content).await,
Edu::SigningKeyUpdate(content) => process_edu_signing_key_update(origin, content).await,
Edu::_Custom(ref _custom) => {
warn!("received custom/unknown edu");
}
}
}
}
async fn process_edu_presence(origin: &ServerName, presence: PresenceContent) {
if !crate::config::get().presence.allow_incoming {
return;
}
for update in presence.push {
if update.user_id.server_name() != origin {
warn!(
%update.user_id, %origin,
"received presence edu for user not belonging to origin"
);
continue;
}
crate::data::user::set_presence(
NewDbPresence {
user_id: update.user_id.clone(),
stream_id: None,
state: Some(update.presence.to_string()),
last_active_at: Some(UnixMillis::now()),
last_federation_update_at: None,
last_user_sync_at: None,
currently_active: None,
occur_sn: None,
status_msg: update.status_msg.clone(),
},
true,
)
.ok();
}
}
async fn process_edu_receipt(origin: &ServerName, receipt: ReceiptContent) {
// if !crate::config::get().allow_incoming_read_receipts() {
// return;
// }
for (room_id, room_updates) in receipt {
if handler::acl_check(origin, &room_id).is_err() {
warn!(
%origin, %room_id,
"received read receipt edu from ACL'd server"
);
continue;
}
for (user_id, user_updates) in room_updates.read {
// if user_id.server_name() != origin {
// warn!(
// %user_id, %origin,
// "received read receipt edu for user not belonging to origin"
// );
// continue;
// }
if room::joined_users(&room_id, None)
.unwrap_or_default()
.iter()
.any(|member| member.server_name() == user_id.server_name())
{
for event_id in &user_updates.event_ids {
let user_receipts =
BTreeMap::from([(user_id.clone(), user_updates.data.clone())]);
let receipts = BTreeMap::from([(ReceiptType::Read, user_receipts)]);
let receipt_content = BTreeMap::from([(event_id.to_owned(), receipts)]);
let event = ReceiptEvent {
content: ReceiptEventContent(receipt_content),
room_id: room_id.clone(),
};
let _ = room::receipt::update_read(&user_id, &room_id, &event, false);
}
} else {
warn!(
%user_id, %room_id, %origin,
"received read receipt edu from server who does not have a member in the room",
);
continue;
}
}
}
}
async fn process_edu_typing(origin: &ServerName, typing: TypingContent) {
// if !crate::config::get().allow_incoming_typing {
// return;
// }
if typing.user_id.server_name() != origin {
warn!(
%typing.user_id, %origin,
"received typing edu for user not belonging to origin"
);
return;
}
if handler::acl_check(typing.user_id.server_name(), &typing.room_id).is_err() {
warn!(
%typing.user_id, %typing.room_id, %origin,
"received typing edu for ACL'd user's server"
);
return;
}
if room::user::is_joined(&typing.user_id, &typing.room_id).unwrap_or(false) {
if typing.typing {
let timeout = UnixMillis::now().get().saturating_add(
crate::config::get()
.typing
.federation_timeout
.saturating_mul(1000),
);
let _ =
room::typing::add_typing(&typing.user_id, &typing.room_id, timeout, false).await;
} else {
let _ = room::typing::remove_typing(&typing.user_id, &typing.room_id, false).await;
}
} else {
warn!(
%typing.user_id, %typing.room_id, %origin,
"received typing edu for user not in room"
);
}
}
async fn process_edu_device_list_update(origin: &ServerName, content: DeviceListUpdateContent) {
let DeviceListUpdateContent {
user_id, device_id, ..
} = content;
if user_id.server_name() != origin {
warn!(
%user_id, %origin,
"received device list update edu for user not belonging to origin"
);
return;
}
let _ = crate::user::mark_device_key_update(&user_id, &device_id);
}
async fn process_edu_direct_to_device(origin: &ServerName, content: DirectDeviceContent) {
let DirectDeviceContent {
sender,
ev_type,
message_id,
messages,
} = content;
if sender.server_name() != origin {
warn!(
%sender, %origin,
"received direct to device edu for user not belonging to origin"
);
return;
}
// Check if this is a new transaction id
if crate::transaction_id::txn_id_exists(&message_id, &sender, None).unwrap_or_default() {
return;
}
for (target_user_id, map) in &messages {
for (target_device_id_maybe, event) in map {
let Ok(event) = event
.deserialize_as()
.map_err(|e| error!("to-device event is invalid: {e}"))
else {
continue;
};
let ev_type = ev_type.to_string();
match target_device_id_maybe {
DeviceIdOrAllDevices::DeviceId(target_device_id) => {
let _ = data::user::device::add_to_device_event(
&sender,
target_user_id,
target_device_id,
&ev_type,
event,
);
}
DeviceIdOrAllDevices::AllDevices => {
let (sender, ev_type, event) = (&sender, &ev_type, &event);
data::user::all_device_ids(target_user_id)
.unwrap_or_default()
.iter()
.for_each(|target_device_id| {
let _ = data::user::device::add_to_device_event(
sender,
target_user_id,
target_device_id,
ev_type,
event.clone(),
);
});
}
}
}
}
// Save transaction id with empty data
let _ = crate::transaction_id::add_txn_id(&message_id, &sender, None, None, None);
}
async fn process_edu_signing_key_update(origin: &ServerName, content: SigningKeyUpdateContent) {
let SigningKeyUpdateContent {
user_id,
master_key,
self_signing_key,
} = content;
if user_id.server_name() != origin {
warn!(
%user_id, %origin,
"received signing key update EDU from server that does not belong to user's server"
);
return;
}
if let Some(master_key) = master_key {
let _ = crate::user::add_cross_signing_keys(
&user_id,
&master_key,
&self_signing_key,
&None,
true,
);
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/user.rs | crates/server/src/room/user.rs | use std::collections::{BTreeMap, HashMap, HashSet};
use diesel::prelude::*;
use indexmap::IndexMap;
use crate::core::Seqnum;
use crate::core::events::push_rules::PushRulesEventContent;
use crate::core::events::room::member::MembershipState;
use crate::core::events::{AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType};
use crate::core::identifiers::*;
use crate::core::push::{AnyPushRuleRef, NewPushRule, NewSimplePushRule};
use crate::core::serde::{JsonValue, RawJson};
use crate::data::room::{DbEventPushSummary, DbRoomTag, NewDbRoomTag};
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
use crate::event::BatchToken;
use crate::{AppResult, MatrixError, OptionalExtension, exts::*};
#[derive(Debug, Clone)]
pub struct UserNotifySummary {
pub notification_count: u64,
pub unread_count: u64,
pub highlight_count: u64,
pub threads: BTreeMap<OwnedEventId, ThreadPushSummary>,
}
#[derive(Debug, Clone)]
pub struct ThreadPushSummary {
pub notification_count: u64,
pub unread_count: u64,
pub highlight_count: u64,
}
impl UserNotifySummary {
pub fn all_notification_count(&self) -> u64 {
self.notification_count
+ self
.threads
.values()
.map(|t| t.notification_count)
.sum::<u64>()
}
pub fn all_unread_count(&self) -> u64 {
self.notification_count + self.threads.values().map(|t| t.unread_count).sum::<u64>()
}
pub fn all_highlight_count(&self) -> u64 {
self.highlight_count
+ self
.threads
.values()
.map(|t| t.highlight_count)
.sum::<u64>()
}
}
impl From<Vec<DbEventPushSummary>> for UserNotifySummary {
fn from(summaries: Vec<DbEventPushSummary>) -> Self {
let mut notification_count = 0;
let mut unread_count = 0;
let mut highlight_count = 0;
let mut threads = BTreeMap::new();
for summary in summaries {
if let Some(thread_id) = summary.thread_id {
threads.insert(
thread_id,
ThreadPushSummary {
notification_count: summary.notification_count as u64,
unread_count: summary.unread_count as u64,
highlight_count: summary.highlight_count as u64,
},
);
} else {
notification_count += summary.notification_count as u64;
unread_count += summary.unread_count as u64;
highlight_count += summary.highlight_count as u64;
}
}
UserNotifySummary {
notification_count,
unread_count,
highlight_count,
threads,
}
}
}
pub fn notify_summary(user_id: &UserId, room_id: &RoomId) -> AppResult<UserNotifySummary> {
let summaries = event_push_summaries::table
.filter(event_push_summaries::user_id.eq(user_id))
.filter(event_push_summaries::room_id.eq(room_id))
.load::<DbEventPushSummary>(&mut connect()?)?;
Ok(summaries.into())
}
pub fn highlight_count(user_id: &UserId, room_id: &RoomId) -> AppResult<u64> {
let count = event_push_summaries::table
.filter(event_push_summaries::user_id.eq(user_id))
.filter(event_push_summaries::room_id.eq(room_id))
.select(event_push_summaries::highlight_count)
.first::<i64>(&mut connect()?)
.unwrap_or_default();
Ok(count as u64)
}
pub fn last_read_notification(user_id: &UserId, room_id: &RoomId) -> AppResult<Seqnum> {
let sn = event_receipts::table
.filter(event_receipts::user_id.eq(user_id))
.filter(event_receipts::room_id.eq(room_id))
.order_by(event_receipts::event_sn.desc())
.select(event_receipts::event_sn)
.first::<Seqnum>(&mut connect()?)
.unwrap_or_default();
Ok(sn)
}
pub fn shared_rooms(user_ids: Vec<OwnedUserId>) -> AppResult<Vec<OwnedRoomId>> {
let mut user_rooms: Vec<(OwnedUserId, Vec<OwnedRoomId>)> = Vec::new();
for user_id in user_ids {
let room_ids = room_users::table
.filter(room_users::user_id.eq(&user_id))
.select(room_users::room_id)
.load::<OwnedRoomId>(&mut connect()?)?;
user_rooms.push((user_id, room_ids));
}
let mut shared_rooms = user_rooms.pop().map(|i| i.1).unwrap_or_default();
if shared_rooms.is_empty() {
return Ok(shared_rooms);
}
while let Some((_user_id, room_ids)) = user_rooms.pop() {
let set1: HashSet<_> = shared_rooms.into_iter().collect();
let set2: HashSet<_> = room_ids.into_iter().collect();
shared_rooms = set1.intersection(&set2).cloned().collect();
if shared_rooms.is_empty() {
return Ok(shared_rooms);
}
}
Ok(shared_rooms)
}
pub fn join_sn(user_id: &UserId, room_id: &RoomId) -> AppResult<Seqnum> {
room_users::table
.filter(room_users::room_id.eq(room_id))
.filter(room_users::user_id.eq(user_id))
.filter(room_users::membership.eq("join"))
.select(room_users::event_sn)
.first::<i64>(&mut connect()?)
.map_err(Into::into)
}
pub fn join_depth(user_id: &UserId, room_id: &RoomId) -> AppResult<u64> {
let join_sn = join_sn(user_id, room_id)?;
events::table
.filter(events::sn.eq(join_sn))
.select(events::depth)
.first::<i64>(&mut connect()?)
.map(|depth| depth as u64)
.map_err(Into::into)
}
pub fn join_count(room_id: &RoomId) -> AppResult<i64> {
let count = room_users::table
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq("join"))
.select(room_users::user_id)
.count()
.get_result(&mut connect()?)?;
Ok(count)
}
pub fn knock_sn(user_id: &UserId, room_id: &RoomId) -> AppResult<i64> {
room_users::table
.filter(room_users::room_id.eq(room_id))
.filter(room_users::user_id.eq(user_id))
.filter(room_users::membership.eq("knock"))
.select(room_users::event_sn)
.first::<i64>(&mut connect()?)
.map_err(Into::into)
}
pub fn knock_count(room_id: &RoomId) -> AppResult<i64> {
let count = room_users::table
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq("knock"))
.select(room_users::user_id)
.count()
.get_result(&mut connect()?)?;
Ok(count)
}
pub fn leave_sn(user_id: &UserId, room_id: &RoomId) -> AppResult<i64> {
room_users::table
.filter(room_users::room_id.eq(room_id))
.filter(room_users::user_id.eq(user_id))
.filter(room_users::membership.eq("leave"))
.select(room_users::event_sn)
.first::<i64>(&mut connect()?)
.map_err(Into::into)
}
#[tracing::instrument]
pub fn is_invited(user_id: &UserId, room_id: &RoomId) -> AppResult<bool> {
let query = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq(MembershipState::Invite.to_string()));
diesel_exists!(query, &mut connect()?).map_err(Into::into)
}
#[tracing::instrument]
pub fn is_banned(user_id: &UserId, room_id: &RoomId) -> AppResult<bool> {
let query = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq(MembershipState::Ban.to_string()));
diesel_exists!(query, &mut connect()?).map_err(Into::into)
}
#[tracing::instrument]
pub fn is_left(user_id: &UserId, room_id: &RoomId) -> AppResult<bool> {
let left = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.order_by(room_users::id.desc())
.select(room_users::membership)
.first::<String>(&mut connect()?)
.map(|m| m == MembershipState::Leave.to_string())
.unwrap_or(true);
Ok(left)
}
#[tracing::instrument]
pub fn is_knocked(user_id: &UserId, room_id: &RoomId) -> AppResult<bool> {
let query = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq(MembershipState::Knock.to_string()));
diesel_exists!(query, &mut connect()?).map_err(Into::into)
}
#[tracing::instrument]
pub fn is_joined(user_id: &UserId, room_id: &RoomId) -> AppResult<bool> {
let joined = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.order_by(room_users::id.desc())
.select(room_users::membership)
.first::<String>(&mut connect()?)
.map(|m| m == MembershipState::Join.to_string())
.unwrap_or(false);
Ok(joined)
}
#[tracing::instrument]
pub fn left_sn(room_id: &RoomId, user_id: &UserId) -> AppResult<Seqnum> {
room_users::table
.filter(room_users::room_id.eq(room_id))
.filter(room_users::user_id.eq(user_id))
.filter(
room_users::membership
.eq("leave")
.or(room_users::membership.eq("ban")),
)
.select(room_users::event_sn)
.first::<Seqnum>(&mut connect()?)
.map_err(Into::into)
}
#[tracing::instrument(level = "trace")]
pub fn invite_state(
user_id: &UserId,
room_id: &RoomId,
) -> AppResult<Vec<RawJson<AnyStrippedStateEvent>>> {
if let Some(state) = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq(MembershipState::Invite.to_string()))
.select(room_users::state_data)
.first::<Option<JsonValue>>(&mut connect()?)
.unwrap_or_default()
{
Ok(serde_json::from_value(state)?)
} else {
Ok(vec![])
}
}
#[tracing::instrument(level = "trace")]
pub fn membership(user_id: &UserId, room_id: &RoomId) -> AppResult<MembershipState> {
let membership = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.order_by(room_users::id.desc())
.select(room_users::membership)
.first::<String>(&mut connect()?);
if let Ok(membership) = membership {
Ok(membership.into())
} else {
Err(
MatrixError::not_found(format!("User {user_id} is not a member of room {room_id}"))
.into(),
)
}
}
/// Returns an iterator over all rooms a user left.
#[tracing::instrument]
pub fn left_rooms(
user_id: &UserId,
since_tk: Option<BatchToken>,
) -> AppResult<HashMap<OwnedRoomId, Vec<RawJson<AnySyncStateEvent>>>> {
let query = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::membership.eq_any(vec![
MembershipState::Leave.to_string(),
MembershipState::Ban.to_string(),
]))
.into_boxed();
let query = if let Some(since_tk) = since_tk {
query.filter(room_users::event_sn.ge(since_tk.event_sn()))
} else {
query.filter(room_users::forgotten.eq(false))
};
let room_event_ids = query
.select((room_users::room_id, room_users::event_id))
.load::<(OwnedRoomId, OwnedEventId)>(&mut connect()?)
.map(|rows| {
let mut map: HashMap<OwnedRoomId, Vec<OwnedEventId>> = HashMap::new();
for (room_id, event_id) in rows {
map.entry(room_id).or_default().push(event_id);
}
map
})?;
let mut room_events = HashMap::new();
for (room_id, event_ids) in room_event_ids {
let events = event_datas::table
.filter(event_datas::event_id.eq_any(&event_ids))
.select(event_datas::json_data)
.load::<JsonValue>(&mut connect()?)?
.into_iter()
.filter_map(|value| RawJson::<AnySyncStateEvent>::from_value(&value).ok())
.collect::<Vec<_>>();
room_events.insert(room_id, events);
}
Ok(room_events)
}
pub fn get_tags(user_id: &UserId, room_id: &RoomId) -> AppResult<Vec<DbRoomTag>> {
let tags = room_tags::table
.filter(room_tags::user_id.eq(user_id))
.filter(room_tags::room_id.eq(room_id))
.load::<DbRoomTag>(&mut connect()?)?;
Ok(tags)
}
pub fn local_users(room_id: &RoomId) -> AppResult<Vec<OwnedUserId>> {
let users = room_users::table
.filter(room_users::room_id.eq(room_id))
.filter(room_users::membership.eq("join"))
.select(room_users::user_id)
.distinct()
.load::<OwnedUserId>(&mut connect()?)?;
let users = users
.into_iter()
.filter(|user_id| user_id.server_name().is_local())
.collect::<Vec<_>>();
Ok(users)
}
/// Copies the tags and direct room state from one room to another.
pub fn copy_room_tags_and_direct_to_room(
user_id: &UserId,
old_room_id: &RoomId,
new_room_id: &RoomId,
) -> AppResult<()> {
let Ok(mut direct_rooms) = crate::user::get_data::<IndexMap<String, Vec<OwnedRoomId>>>(
user_id,
None,
&GlobalAccountDataEventType::Direct.to_string(),
) else {
return Ok(());
};
let old_room_id = old_room_id.to_owned();
for (key, room_ids) in direct_rooms.iter_mut() {
if room_ids.contains(&old_room_id) {
room_ids.retain(|r| r != &old_room_id);
let new_room_id = new_room_id.to_owned();
if !room_ids.contains(&new_room_id) {
room_ids.push(new_room_id);
}
}
}
crate::user::set_data(
user_id,
None,
&GlobalAccountDataEventType::Direct.to_string(),
serde_json::to_value(direct_rooms)?,
)?;
let room_tags = get_tags(user_id, &old_room_id)?;
for tag in room_tags {
let DbRoomTag {
user_id,
tag,
content,
..
} = tag;
let new_tag = NewDbRoomTag {
user_id,
room_id: new_room_id.to_owned(),
tag,
content,
};
diesel::insert_into(room_tags::table)
.values(&new_tag)
.execute(&mut connect()?)?;
}
Ok(())
}
/// Copy all of the push rules from one room to another for a specific user
pub fn copy_push_rules_from_room_to_room(
user_id: &UserId,
old_room_id: &RoomId,
new_room_id: &RoomId,
) -> AppResult<()> {
let Ok(mut user_data_content) = crate::data::user::get_data::<PushRulesEventContent>(
user_id,
None,
&GlobalAccountDataEventType::PushRules.to_string(),
) else {
return Ok(());
};
let mut new_rules = vec![];
for push_rule in user_data_content.global.iter() {
if !push_rule.enabled() {
continue;
}
match push_rule {
// AnyPushRuleRef::Override(rule) => {
// },
// AnyPushRuleRef::Content(rule) => {
// },
// AnyPushRuleRef::PostContent(rule) => {
// },
// AnyPushRuleRef::Sender(rule) => {
// },
// AnyPushRuleRef::Underride(rule) => {
// },
AnyPushRuleRef::Room(rule) => {
let new_rule = NewPushRule::Room(NewSimplePushRule::new(
new_room_id.to_owned(),
rule.actions.clone(),
));
new_rules.push(new_rule);
}
_ => {}
}
}
for new_rule in new_rules {
if let Err(e) = user_data_content.global.insert(new_rule, None, None) {
error!("failed to insert copied push rule: {}", e);
}
}
crate::data::user::set_data(
user_id,
None,
&GlobalAccountDataEventType::PushRules.to_string(),
serde_json::to_value(user_data_content)?,
)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/pdu_metadata.rs | crates/server/src/room/pdu_metadata.rs | use diesel::prelude::*;
use palpo_core::Seqnum;
use serde::Deserialize;
use crate::AppResult;
use crate::core::Direction;
use crate::core::client::relation::RelationEventsResBody;
use crate::core::events::{TimelineEventType, relation::RelationType};
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::room::{DbEventRelation, NewDbEventRelation};
use crate::data::schema::*;
use crate::event::{BatchToken, SnPduEvent};
use crate::room::timeline;
#[derive(Clone, Debug, Deserialize)]
struct ExtractRelType {
rel_type: RelationType,
}
#[derive(Clone, Debug, Deserialize)]
struct ExtractRelatesToEventId {
#[serde(rename = "m.relates_to")]
relates_to: ExtractRelType,
}
#[tracing::instrument]
pub fn add_relation(
room_id: &RoomId,
event_id: &EventId,
child_id: &EventId,
rel_type: Option<RelationType>,
) -> AppResult<()> {
let (event_sn, event_ty) = crate::event::get_event_sn_and_ty(event_id)?;
let (child_sn, child_ty) = crate::event::get_event_sn_and_ty(child_id)?;
diesel::insert_into(event_relations::table)
.values(&NewDbEventRelation {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
event_sn,
event_ty,
child_id: child_id.to_owned(),
child_sn,
child_ty,
rel_type: rel_type.map(|v| v.to_string()),
})
.execute(&mut connect()?)?;
Ok(())
}
pub fn paginate_relations_with_filter(
user_id: &UserId,
room_id: &RoomId,
target: &EventId,
filter_event_type: Option<TimelineEventType>,
filter_rel_type: Option<RelationType>,
from: Option<&str>,
to: Option<&str>,
limit: Option<usize>,
recurse: bool,
dir: Direction,
) -> AppResult<RelationEventsResBody> {
let prev_batch = from.map(|from| from.to_string());
let from = from
.map(|from| from.parse())
.transpose()?
.unwrap_or(match dir {
Direction::Forward => BatchToken::LIVE_MIN,
Direction::Backward => BatchToken::LIVE_MAX,
});
let to: Option<BatchToken> = to.map(|to| to.parse()).transpose()?;
// Use limit or else 10, with maximum 100
let limit = limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
// Spec (v1.10) recommends depth of at least 3
let depth: u8 = if recurse { 3 } else { 1 };
let events: Vec<_> = crate::room::pdu_metadata::get_relations(
user_id,
room_id,
target,
filter_event_type.as_ref(),
filter_rel_type.as_ref(),
from.event_sn(),
to.map(|t| t.event_sn()),
dir,
limit,
)?;
let next_token = match dir {
Direction::Forward => events
.last()
.map(|(_, pdu)| BatchToken::new_live(pdu.event_sn + 1)),
Direction::Backward => events
.last()
.map(|(_, pdu)| BatchToken::new_live(pdu.event_sn - 1)),
};
let events: Vec<_> = events
.into_iter()
.map(|(_, pdu)| pdu.to_message_like_event())
.collect();
Ok(RelationEventsResBody {
chunk: events,
next_batch: next_token.map(|t| t.to_string()),
prev_batch,
recursion_depth: if recurse { Some(depth.into()) } else { None },
})
}
pub fn get_relations(
user_id: &UserId,
room_id: &RoomId,
event_id: &EventId,
child_ty: Option<&TimelineEventType>,
rel_type: Option<&RelationType>,
from: Seqnum,
to: Option<Seqnum>,
dir: Direction,
limit: usize,
) -> AppResult<Vec<(Seqnum, SnPduEvent)>> {
let mut query = event_relations::table
.filter(event_relations::room_id.eq(room_id))
.filter(event_relations::event_id.eq(event_id))
.into_boxed();
if let Some(child_ty) = child_ty {
query = query.filter(event_relations::child_ty.eq(child_ty.to_string()));
}
if let Some(rel_type) = rel_type {
query = query.filter(event_relations::rel_type.eq(rel_type.to_string()));
}
match dir {
Direction::Forward => {
query = query.filter(event_relations::child_sn.ge(from));
if let Some(to) = to {
query = query.filter(event_relations::child_sn.le(to));
}
query = query.order_by(event_relations::child_sn.asc());
}
Direction::Backward => {
query = query.filter(event_relations::child_sn.le(from));
if let Some(to) = to {
query = query.filter(event_relations::child_sn.ge(to));
}
query = query.order_by(event_relations::child_sn.desc());
}
}
let relations = query
.limit(limit as i64)
.load::<DbEventRelation>(&mut connect()?)?;
let mut pdus = Vec::with_capacity(relations.len());
for relation in relations {
if let Ok(mut pdu) = timeline::get_pdu(&relation.child_id) {
if pdu.sender != user_id {
pdu.remove_transaction_id()?;
}
if pdu.user_can_see(user_id).unwrap_or(false) {
pdus.push((relation.child_sn, pdu));
}
}
}
Ok(pdus)
}
// #[tracing::instrument(skip(room_id, event_ids))]
// pub fn mark_as_referenced(room_id: &RoomId, event_ids: &[OwnedEventId]) -> AppResult<()> {
// for prev in event_ids {
// let mut key = room_id.as_bytes().to_vec();
// key.extend_from_slice(prev.as_bytes());
// self.referencedevents.insert(&key, &[])?;
// }
// Ok(())
// }
// pub fn is_event_referenced(room_id: &RoomId, event_id: &EventId) -> AppResult<bool> {
// let mut key = room_id.as_bytes().to_vec();
// key.extend_from_slice(event_id.as_bytes());
// Ok(self.referencedevents.get(&key)?.is_some())
// }
#[tracing::instrument(skip(event_id))]
pub fn mark_event_soft_failed(event_id: &EventId) -> AppResult<()> {
diesel::update(events::table.filter(events::id.eq(event_id)))
.set(events::soft_failed.eq(true))
.execute(&mut connect()?)?;
Ok(())
}
pub fn is_event_soft_failed(event_id: &EventId) -> AppResult<bool> {
events::table
.filter(events::id.eq(event_id))
.select(events::soft_failed)
.first(&mut connect()?)
.map_err(Into::into)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/lazy_loading.rs | crates/server/src/room/lazy_loading.rs | use std::collections::{HashMap, HashSet};
use std::sync::{LazyLock, Mutex};
use diesel::prelude::*;
use palpo_core::Seqnum;
use crate::AppResult;
use crate::core::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
pub static LAZY_LOAD_WAITING: LazyLock<
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, Seqnum), HashSet<OwnedUserId>>>,
> = LazyLock::new(Default::default);
#[tracing::instrument]
pub fn lazy_load_was_sent_before(
user_id: &UserId,
device_id: &DeviceId,
room_id: &RoomId,
confirmed_user_id: &UserId,
) -> AppResult<bool> {
let query = lazy_load_deliveries::table
.filter(lazy_load_deliveries::user_id.eq(user_id))
.filter(lazy_load_deliveries::device_id.eq(device_id))
.filter(lazy_load_deliveries::room_id.eq(room_id))
.filter(lazy_load_deliveries::confirmed_user_id.eq(confirmed_user_id));
diesel_exists!(query, &mut connect()?).map_err(Into::into)
}
#[tracing::instrument]
pub fn lazy_load_mark_sent(
user_id: &UserId,
device_id: &DeviceId,
room_id: &RoomId,
lazy_load: HashSet<OwnedUserId>,
until_sn: Seqnum,
) {
LAZY_LOAD_WAITING.lock().unwrap().insert(
(
user_id.to_owned(),
device_id.to_owned(),
room_id.to_owned(),
until_sn,
),
lazy_load,
);
}
#[tracing::instrument]
pub fn lazy_load_confirm_delivery(
user_id: &UserId,
device_id: &DeviceId,
room_id: &RoomId,
occur_sn: Seqnum,
) -> AppResult<()> {
if let Some(confirmed_user_ids) = LAZY_LOAD_WAITING.lock().unwrap().remove(&(
user_id.to_owned(),
device_id.to_owned(),
room_id.to_owned(),
occur_sn,
)) {
for confirmed_user_id in confirmed_user_ids {
diesel::insert_into(lazy_load_deliveries::table)
.values((
lazy_load_deliveries::user_id.eq(user_id),
lazy_load_deliveries::device_id.eq(device_id),
lazy_load_deliveries::room_id.eq(room_id),
lazy_load_deliveries::confirmed_user_id.eq(confirmed_user_id),
))
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
}
}
Ok(())
}
#[tracing::instrument]
pub fn lazy_load_reset(user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) -> AppResult<()> {
diesel::delete(
lazy_load_deliveries::table
.filter(lazy_load_deliveries::user_id.eq(user_id))
.filter(lazy_load_deliveries::device_id.eq(device_id))
.filter(lazy_load_deliveries::room_id.eq(room_id)),
)
.execute(&mut connect()?)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/state.rs | crates/server/src/room/state.rs | use std::collections::HashMap;
use std::sync::{Arc, LazyLock, Mutex};
use diesel::prelude::*;
use indexmap::IndexMap;
use lru_cache::LruCache;
use serde::Deserialize;
use serde::de::DeserializeOwned;
mod diff;
pub use diff::*;
mod field;
pub use field::*;
mod frame;
pub use frame::*;
mod graph;
pub use graph::*;
use crate::core::events::room::history_visibility::HistoryVisibility;
use crate::core::events::room::member::{MembershipState, RoomMemberEventContent};
use crate::core::events::room::power_levels::RoomPowerLevelsEventContent;
use crate::core::events::{AnyStrippedStateEvent, StateEventType, TimelineEventType};
use crate::core::identifiers::*;
use crate::core::room::{AllowRule, JoinRule, RoomMembership};
use crate::core::room_version_rules::AuthorizationRules;
use crate::core::serde::{JsonValue, RawJson};
use crate::core::state::StateMap;
use crate::core::{EventId, OwnedEventId, RoomId, UserId};
use crate::data::room::{NewDbEventMissing, NewDbTimelineGap};
use crate::data::{connect, schema::*};
use crate::event::{PduEvent, update_frame_id, update_frame_id_by_sn};
use crate::room::timeline;
use crate::{
AppError, AppResult, MatrixError, RoomMutexGuard, SnPduEvent, membership, room, utils,
};
pub static SERVER_VISIBILITY_CACHE: LazyLock<Mutex<LruCache<(OwnedServerName, i64), bool>>> =
LazyLock::new(|| Mutex::new(LruCache::new(100)));
pub static USER_VISIBILITY_CACHE: LazyLock<Mutex<LruCache<(OwnedUserId, i64), bool>>> =
LazyLock::new(|| Mutex::new(LruCache::new(100)));
#[derive(Insertable, Identifiable, Queryable, Debug, Clone)]
#[diesel(table_name = room_state_deltas, primary_key(frame_id))]
pub struct DbRoomStateDelta {
pub frame_id: i64,
pub room_id: OwnedRoomId,
pub parent_id: Option<i64>,
pub appended: Vec<u8>,
pub disposed: Vec<u8>,
}
// #[derive(Insertable, Debug, Clone)]
// #[diesel(table_name = room_state_deltas)]
// pub struct NewDbRoomStateDelta {
// pub room_id: OwnedRoomId,
// pub frame_id: i64,
// pub parent_id: Option<i64>,
// pub appended: Vec<u8>,
// pub disposed: Vec<u8>,
// }
pub fn server_joined_rooms(server_name: &ServerName) -> AppResult<Vec<OwnedRoomId>> {
room_joined_servers::table
.filter(room_joined_servers::server_id.eq(server_name))
.select(room_joined_servers::room_id)
.load::<OwnedRoomId>(&mut connect()?)
.map_err(Into::into)
}
/// Set the room to the given state_hash and update caches.
pub fn force_state(
room_id: &RoomId,
frame_id: i64,
appended: Arc<CompressedState>,
_disposed_data: Arc<CompressedState>,
) -> AppResult<()> {
let event_ids = appended
.iter()
.filter_map(|new| new.split().ok().map(|(_, id)| id))
.collect::<Vec<_>>();
for event_id in &event_ids {
let pdu = match timeline::get_pdu(event_id) {
Ok(pdu) => pdu,
_ => continue,
};
match pdu.event_ty {
TimelineEventType::RoomMember => {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
let membership = match pdu.get_content::<ExtractMembership>() {
Ok(e) => e.membership,
Err(_) => continue,
};
let state_key = match &pdu.state_key {
Some(k) => k,
None => continue,
};
let user_id = match UserId::parse(state_key) {
Ok(id) => id,
Err(_) => continue,
};
membership::update_membership(
&pdu.event_id,
pdu.event_sn,
room_id,
&user_id,
membership,
&pdu.sender,
None,
)?;
}
TimelineEventType::SpaceChild => {
let mut cache = room::space::ROOM_ID_SPACE_CHUNK_CACHE.lock().unwrap();
cache.remove(&(pdu.room_id.clone(), false));
cache.remove(&(pdu.room_id.clone(), true));
}
_ => continue,
}
}
set_room_state(room_id, frame_id)?;
Ok(())
}
#[tracing::instrument]
pub fn set_room_state(room_id: &RoomId, frame_id: i64) -> AppResult<()> {
diesel::update(rooms::table.find(room_id))
.set(rooms::state_frame_id.eq(frame_id))
.execute(&mut connect()?)?;
Ok(())
}
/// Generates a new StateHash and associates it with the incoming event.
///
/// This adds all current state events (not including the incoming event).
#[tracing::instrument(skip(state_ids_compressed), level = "debug")]
pub fn set_event_state(
event_id: &EventId,
event_sn: i64,
room_id: &RoomId,
state_ids_compressed: Arc<CompressedState>,
) -> AppResult<i64> {
let prev_frame_id = get_room_frame_id(room_id, None).ok();
let hash_data = utils::hash_keys(state_ids_compressed.iter().map(|s| &s[..]));
if let Ok(frame_id) = get_frame_id(room_id, &hash_data) {
update_frame_id(event_id, frame_id)?;
Ok(frame_id)
} else {
let frame_id = ensure_frame(room_id, hash_data)?;
let states_parents = if let Some(prev_frame_id) = prev_frame_id {
load_frame_info(prev_frame_id)?
} else {
Vec::new()
};
let (appended, disposed) = if let Some(parent_state_info) = states_parents.last() {
let appended: CompressedState = state_ids_compressed
.difference(&parent_state_info.full_state)
.copied()
.collect();
let disposed: CompressedState = parent_state_info
.full_state
.difference(&state_ids_compressed)
.copied()
.collect();
(Arc::new(appended), Arc::new(disposed))
} else {
(state_ids_compressed, Arc::new(CompressedState::new()))
};
update_frame_id(event_id, frame_id)?;
calc_and_save_state_delta(
room_id,
frame_id,
appended,
disposed,
1_000_000,
states_parents,
)?;
Ok(frame_id)
}
}
/// Generates a new StateHash and associates it with the incoming event.
///
/// This adds all current state events (not including the incoming event).
#[tracing::instrument(skip(new_pdu))]
pub fn append_to_state(new_pdu: &SnPduEvent) -> AppResult<i64> {
let prev_frame_id = get_room_frame_id(&new_pdu.room_id, None).ok();
if let Some(state_key) = &new_pdu.state_key {
let states_parents = prev_frame_id.map_or_else(|| Ok(Vec::new()), load_frame_info)?;
let field_id = ensure_field(&new_pdu.event_ty.to_string().into(), state_key)?.id;
let new_compressed_event = CompressedEvent::new(field_id, new_pdu.event_sn);
let replaces = states_parents
.last()
.map(|info| {
info.full_state
.iter()
.find(|bytes| bytes.starts_with(&field_id.to_be_bytes()))
})
.unwrap_or_default();
if Some(&new_compressed_event) == replaces {
return prev_frame_id.ok_or_else(|| {
MatrixError::invalid_param("Room previous point must exists.").into()
});
}
// TODO: state_hash with deterministic inputs
let mut appended = CompressedState::new();
appended.insert(new_compressed_event);
let mut disposed = CompressedState::new();
if let Some(replaces) = replaces {
disposed.insert(*replaces);
}
let hash_data = utils::hash_keys([new_compressed_event.as_bytes()].into_iter());
let frame_id = ensure_frame(&new_pdu.room_id, hash_data)?;
update_frame_id(&new_pdu.event_id, frame_id)?;
calc_and_save_state_delta(
&new_pdu.room_id,
frame_id,
Arc::new(appended),
Arc::new(disposed),
2,
states_parents,
)?;
Ok(frame_id)
} else {
let frame_id = prev_frame_id
.ok_or_else(|| MatrixError::invalid_param("Room previous point must exists."))?;
update_frame_id(&new_pdu.event_id, frame_id)?;
Ok(frame_id)
}
}
pub fn summary_stripped(event: &PduEvent) -> AppResult<Vec<RawJson<AnyStrippedStateEvent>>> {
let cells: [(&StateEventType, &str); 8] = [
(&StateEventType::RoomCreate, ""),
(&StateEventType::RoomJoinRules, ""),
(&StateEventType::RoomCanonicalAlias, ""),
(&StateEventType::RoomName, ""),
(&StateEventType::RoomAvatar, ""),
(&StateEventType::RoomMember, event.sender.as_str()), // Add recommended events
(&StateEventType::RoomEncryption, ""),
(&StateEventType::RoomTopic, ""),
];
let mut state = Vec::new();
// Add recommended events
for (event_type, state_key) in cells {
if let Ok(e) = super::get_state(&event.room_id, event_type, state_key, None) {
state.push(e.to_stripped_state_event());
}
}
state.push(event.to_stripped_state_event());
Ok(state)
}
pub fn get_forward_extremities(room_id: &RoomId) -> AppResult<Vec<OwnedEventId>> {
let event_ids = event_forward_extremities::table
.filter(event_forward_extremities::room_id.eq(room_id))
.select(event_forward_extremities::event_id)
.distinct()
.load::<OwnedEventId>(&mut connect()?)?
.into_iter()
.collect();
Ok(event_ids)
}
pub fn set_forward_extremities<'a, I>(
room_id: &RoomId,
event_ids: I,
_lock: &RoomMutexGuard,
) -> AppResult<()>
where
I: Iterator<Item = &'a EventId> + Send + 'a,
{
let event_ids = event_ids.collect::<Vec<_>>();
diesel::delete(
event_forward_extremities::table
.filter(event_forward_extremities::room_id.eq(room_id))
.filter(event_forward_extremities::event_id.ne_all(&event_ids)),
)
.execute(&mut connect()?)?;
for event_id in event_ids {
diesel::insert_into(event_forward_extremities::table)
.values((
event_forward_extremities::room_id.eq(room_id),
event_forward_extremities::event_id.eq(event_id),
))
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
}
Ok(())
}
pub fn get_backward_extremities(room_id: &RoomId) -> AppResult<Vec<OwnedEventId>> {
let event_ids = event_backward_extremities::table
.filter(event_backward_extremities::room_id.eq(room_id))
.select(event_backward_extremities::event_id)
.distinct()
.load::<OwnedEventId>(&mut connect()?)?
.into_iter()
.collect();
Ok(event_ids)
}
pub fn update_backward_extremities(pdu: &SnPduEvent) -> AppResult<()> {
if pdu.is_outlier {
diesel::insert_into(event_backward_extremities::table)
.values((
event_backward_extremities::room_id.eq(&pdu.room_id),
event_backward_extremities::event_id.eq(&pdu.event_id),
))
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
} else {
let existing_ids = events::table
.filter(events::id.eq_any(&pdu.prev_events))
.filter(events::is_outlier.eq(false))
.select(events::id)
.load::<OwnedEventId>(&mut connect()?)?;
let missing_ids: Vec<OwnedEventId> = pdu
.prev_events
.iter()
.filter(|id| !existing_ids.contains(id))
.cloned()
.collect();
if missing_ids.is_empty() {
diesel::delete(
event_backward_extremities::table
.filter(event_backward_extremities::room_id.eq(&pdu.room_id))
.filter(event_backward_extremities::event_id.eq(&pdu.event_id)),
)
.execute(&mut connect()?)?;
diesel::delete(
timeline_gaps::table
.filter(timeline_gaps::room_id.eq(&pdu.room_id))
.filter(timeline_gaps::event_sn.eq(pdu.event_sn)),
)
.execute(&mut connect()?)?;
} else {
for event_id in &missing_ids {
diesel::insert_into(event_backward_extremities::table)
.values((
event_backward_extremities::room_id.eq(&pdu.room_id),
event_backward_extremities::event_id.eq(event_id),
))
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
}
diesel::insert_into(event_backward_extremities::table)
.values((
event_backward_extremities::room_id.eq(&pdu.room_id),
event_backward_extremities::event_id.eq(&pdu.event_id),
))
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
diesel::insert_into(timeline_gaps::table)
.values(NewDbTimelineGap {
room_id: pdu.room_id.clone(),
event_id: pdu.event_id.clone(),
event_sn: pdu.event_sn,
})
.execute(&mut connect()?)?;
}
}
let existing_ids = events::table
.filter(events::id.eq_any(&pdu.prev_events))
.select(events::id)
.load::<OwnedEventId>(&mut connect()?)?;
let missing_ids: Vec<OwnedEventId> = pdu
.prev_events
.iter()
.filter(|id| !existing_ids.contains(id))
.cloned()
.collect();
for missing_id in missing_ids {
diesel::insert_into(event_missings::table)
.values(NewDbEventMissing {
room_id: pdu.room_id.clone(),
event_id: pdu.event_id.clone(),
event_sn: pdu.event_sn,
missing_id: missing_id.clone(),
})
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
}
Ok(())
}
/// This fetches auth events from the current state.
#[tracing::instrument]
pub fn get_auth_events(
room_id: &RoomId,
kind: &TimelineEventType,
sender: &UserId,
state_key: Option<&str>,
content: &serde_json::value::RawValue,
auth_rules: &AuthorizationRules,
) -> AppResult<StateMap<SnPduEvent>> {
let frame_id = if let Ok(current_frame_id) = get_room_frame_id(room_id, None) {
current_frame_id
} else {
return Ok(HashMap::new());
};
let auth_types =
crate::core::state::auth_types_for_event(kind, sender, state_key, content, auth_rules)?;
let mut sauth_events = auth_types
.into_iter()
.filter_map(|(event_type, state_key)| {
get_field_id(&event_type.to_string().into(), &state_key)
.ok()
.map(|field_id| (field_id, (event_type, state_key)))
})
.collect::<HashMap<_, _>>();
let full_state = load_frame_info(frame_id)?
.pop()
.expect("there is always one layer")
.full_state;
let mut state_map = StateMap::new();
for state in full_state.iter() {
let (state_key_id, event_id) = state.split()?;
if let Some(key) = sauth_events.remove(&state_key_id) {
if let Ok(pdu) = timeline::get_pdu(&event_id) {
state_map.insert(key, pdu);
} else {
tracing::warn!("pdu is not found: {}", event_id);
}
}
}
Ok(state_map)
}
/// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash.
pub fn get_full_state_ids(frame_id: i64) -> AppResult<IndexMap<i64, OwnedEventId>> {
let full_state = load_frame_info(frame_id)?
.pop()
.expect("there is always one layer")
.full_state;
let mut map = IndexMap::new();
for compressed in full_state.iter() {
let splited = compressed.split()?;
map.insert(splited.0, splited.1);
}
Ok(map)
}
pub fn get_full_state(frame_id: i64) -> AppResult<IndexMap<(StateEventType, String), SnPduEvent>> {
let full_state = load_frame_info(frame_id)?
.pop()
.expect("there is always one layer")
.full_state;
let mut result = IndexMap::new();
for compressed in full_state.iter() {
let (_, event_id) = compressed.split()?;
if let Ok(pdu) = timeline::get_pdu(&event_id) {
result.insert(
(
pdu.event_ty.to_string().into(),
pdu.state_key
.as_ref()
.ok_or_else(|| {
error!("state event has no state key: {:?}", pdu);
AppError::public("state event has no state key")
})?
.clone(),
),
pdu,
);
}
}
Ok(result)
}
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
pub fn get_state_event_id(
frame_id: i64,
event_type: &StateEventType,
state_key: &str,
) -> AppResult<OwnedEventId> {
let state_key_id = get_field_id(event_type, state_key)?;
let full_state = load_frame_info(frame_id)?
.pop()
.expect("there is always one layer")
.full_state;
full_state
.iter()
.find(|bytes| bytes.starts_with(&state_key_id.to_be_bytes()))
.and_then(|compressed| compressed.split().ok().map(|(_, id)| id))
.ok_or(MatrixError::not_found("state event not found").into())
}
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
pub fn get_state(
frame_id: i64,
event_type: &StateEventType,
state_key: &str,
) -> AppResult<SnPduEvent> {
let event_id = get_state_event_id(frame_id, event_type, state_key)?;
timeline::get_pdu(&event_id)
}
pub fn get_state_content<T>(
frame_id: i64,
event_type: &StateEventType,
state_key: &str,
) -> AppResult<T>
where
T: DeserializeOwned,
{
Ok(get_state(frame_id, event_type, state_key)?.get_content()?)
}
/// Get membership for given user in state
pub fn user_membership(frame_id: i64, user_id: &UserId) -> AppResult<MembershipState> {
get_state_content::<RoomMemberEventContent>(
frame_id,
&StateEventType::RoomMember,
user_id.as_str(),
)
.map(|c: RoomMemberEventContent| c.membership)
}
/// The user was a joined member at this state (potentially in the past)
pub fn user_was_joined(frame_id: i64, user_id: &UserId) -> bool {
user_membership(frame_id, user_id)
.map(|s| s == MembershipState::Join)
.unwrap_or_default() // Return sensible default, i.e. false
}
/// The user was an invited or joined room member at this state (potentially
/// in the past)
pub fn user_was_invited(frame_id: i64, user_id: &UserId) -> bool {
user_membership(frame_id, user_id)
.map(|s| s == MembershipState::Join || s == MembershipState::Invite)
.unwrap_or_default() // Return sensible default, i.e. false
}
/// Checks if a given user can redact a given event
///
/// If federation is true, it allows redaction events from any user of the
/// same server as the original event sender
pub async fn user_can_redact(
redacts: &EventId,
sender: &UserId,
room_id: &RoomId,
federation: bool,
) -> AppResult<bool> {
let redacting_event = timeline::get_pdu(redacts);
if redacting_event
.as_ref()
.is_ok_and(|pdu| pdu.event_ty == TimelineEventType::RoomCreate)
{
return Err(MatrixError::forbidden(
"Redacting m.room.create is not safe, forbidding.",
None,
)
.into());
}
if redacting_event
.as_ref()
.is_ok_and(|pdu| pdu.event_ty == TimelineEventType::RoomServerAcl)
{
return Err(MatrixError::forbidden(
"Redacting m.room.server_acl will result in the room being inaccessible for \
everyone (empty allow key), forbidding.",
None,
)
.into());
}
if let Ok(power_levels) = super::get_power_levels(room_id).await {
Ok(power_levels.user_can_redact_event_of_other(sender)
|| power_levels.user_can_redact_own_event(sender)
&& if let Ok(redacting_event) = redacting_event {
if federation {
redacting_event.sender.server_name() == sender.server_name()
} else {
redacting_event.sender == sender
}
} else {
false
})
} else {
// Falling back on m.room.create to judge power level
if let Ok(room_create) = super::get_state(room_id, &StateEventType::RoomCreate, "", None) {
Ok(room_create.sender == sender
|| redacting_event
.as_ref()
.is_ok_and(|redacting_event| redacting_event.sender == sender))
} else {
Err(AppError::public(
"No m.room.power_levels or m.room.create events in database for room",
))
}
}
}
/// Whether a server is allowed to see an event through federation, based on
/// the room's history_visibility at that event's state.
#[tracing::instrument(skip(origin, room_id, event_id))]
pub fn server_can_see_event(
origin: &ServerName,
room_id: &RoomId,
event_id: &EventId,
) -> AppResult<bool> {
let frame_id = match get_pdu_frame_id(event_id) {
Ok(frame_id) => frame_id,
Err(_) => return Ok(true),
};
let history_visibility = super::get_history_visibility(room_id)?;
let visibility = match history_visibility {
HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true,
HistoryVisibility::Invited => {
// Allow if any member on requesting server was AT LEAST invited, else deny
room::invited_users(room_id, None)?
.into_iter()
.filter(|member| member.server_name() == origin)
.any(|member| user_was_invited(frame_id, &member))
|| room::joined_users(room_id, None)?
.into_iter()
.filter(|member| member.server_name() == origin)
.any(|member| user_was_joined(frame_id, &member))
}
HistoryVisibility::Joined => {
// Allow if any member on requested server was joined, else deny
room::joined_users(room_id, None)?
.into_iter()
.filter(|member| member.server_name() == origin)
.any(|member| user_was_joined(frame_id, &member))
}
_ => {
error!("Unknown history visibility {history_visibility}");
false
}
};
// SERVER_VISIBILITY_CACHE
// .lock()
// .unwrap()
// .insert((origin.to_owned(), frame_id), visibility);
Ok(visibility)
}
#[tracing::instrument(skip(origin, user_id))]
pub fn server_can_see_user(origin: &ServerName, user_id: &UserId) -> AppResult<bool> {
Ok(server_joined_rooms(origin)?
.iter()
.any(|room_id| super::user::is_joined(user_id, room_id).unwrap_or(false)))
}
#[tracing::instrument(skip(sender_id, user_id))]
pub fn user_can_see_user(sender_id: &UserId, user_id: &UserId) -> AppResult<bool> {
super::user::shared_rooms(vec![sender_id.to_owned(), user_id.to_owned()])
.map(|rooms| !rooms.is_empty())
}
/// Whether a user is allowed to see an event, based on
/// the room's history_visibility at that event's state.
#[tracing::instrument(skip(user_id, event_id))]
pub fn user_can_see_event(user_id: &UserId, event_id: &EventId) -> AppResult<bool> {
let pdu = timeline::get_pdu(event_id)?;
pdu.user_can_see(user_id)
}
/// Whether a user is allowed to see an event, based on
/// the room's history_visibility at that event's state.
#[tracing::instrument(skip(user_id, room_id))]
pub fn user_can_see_events(user_id: &UserId, room_id: &RoomId) -> AppResult<bool> {
if super::user::is_joined(user_id, room_id)? {
return Ok(true);
}
let history_visibility = super::get_history_visibility(room_id)?;
match history_visibility {
HistoryVisibility::Invited => super::user::is_invited(user_id, room_id),
HistoryVisibility::WorldReadable => Ok(true),
_ => Ok(false),
}
}
/// Returns the new state_hash, and the state diff from the previous room state
pub fn save_state(
room_id: &RoomId,
new_compressed_events: Arc<CompressedState>,
) -> AppResult<DeltaInfo> {
let prev_frame_id = get_room_frame_id(room_id, None).ok();
let hash_data = utils::hash_keys(new_compressed_events.iter().map(|bytes| &bytes[..]));
let (new_frame_id, frame_existed) = if let Ok(frame_id) = get_frame_id(room_id, &hash_data) {
(frame_id, true)
} else {
let frame_id = ensure_frame(room_id, hash_data)?;
(frame_id, false)
};
if Some(new_frame_id) == prev_frame_id {
return Ok(DeltaInfo {
frame_id: new_frame_id,
appended: Arc::new(CompressedState::new()),
disposed: Arc::new(CompressedState::new()),
});
}
for new_compressed_event in new_compressed_events.iter() {
update_frame_id_by_sn(new_compressed_event.event_sn(), new_frame_id)?;
}
let states_parents = prev_frame_id.map_or_else(|| Ok(Vec::new()), load_frame_info)?;
let (appended, disposed) = if let Some(parent_state_info) = states_parents.last() {
let appended: CompressedState = new_compressed_events
.difference(&parent_state_info.full_state)
.copied()
.collect();
let disposed: CompressedState = parent_state_info
.full_state
.difference(&new_compressed_events)
.copied()
.collect();
(Arc::new(appended), Arc::new(disposed))
} else {
(new_compressed_events, Arc::new(CompressedState::new()))
};
if !frame_existed {
calc_and_save_state_delta(
room_id,
new_frame_id,
appended.clone(),
disposed.clone(),
2, // every state change is 2 event changes on average
states_parents,
)?;
};
Ok(DeltaInfo {
frame_id: new_frame_id,
appended,
disposed,
})
}
#[tracing::instrument]
pub fn get_user_state(
user_id: &UserId,
room_id: &RoomId,
) -> AppResult<Option<Vec<RawJson<AnyStrippedStateEvent>>>> {
if let Some(state) = room_users::table
.filter(room_users::user_id.eq(user_id))
.filter(room_users::room_id.eq(room_id))
.select(room_users::state_data)
.first::<Option<JsonValue>>(&mut connect()?)
.optional()?
.flatten()
{
Ok(Some(serde_json::from_value(state)?))
} else {
Ok(None)
}
}
/// Gets up to five servers that are likely to be in the room in the
/// distant future.
///
/// See <https://spec.matrix.org/latest/appendices/#routing>
#[tracing::instrument(level = "trace")]
pub fn servers_route_via(room_id: &RoomId) -> AppResult<Vec<OwnedServerName>> {
let Ok(pdu) = super::get_state(room_id, &StateEventType::RoomPowerLevels, "", None) else {
return Ok(Vec::new());
};
let most_powerful_user_server = pdu
.get_content::<RoomPowerLevelsEventContent>()?
.users
.iter()
.max_by_key(|(_, power)| *power)
.and_then(|x| (*x.1 >= 50).then_some(x))
.map(|(user, _power)| user.server_name().to_owned());
let mut servers: Vec<OwnedServerName> = super::joined_servers(room_id)?
.into_iter()
.take(5)
.collect();
if let Some(server) = most_powerful_user_server {
servers.insert(0, server);
servers.truncate(5);
}
Ok(servers)
}
/// Returns an empty vec if not a restricted room
pub fn allowed_room_ids(join_rule: JoinRule) -> Vec<OwnedRoomId> {
let mut room_ids = Vec::with_capacity(1);
if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule {
for rule in r.allow {
if let AllowRule::RoomMembership(RoomMembership {
room_id: membership,
}) = rule
{
room_ids.push(membership.clone());
}
}
}
room_ids
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/push_action.rs | crates/server/src/room/push_action.rs | use diesel::prelude::*;
use crate::AppResult;
use crate::core::Seqnum;
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::room::NewDbEventPushAction;
use crate::data::schema::*;
pub fn increment_notification_counts(
event_id: &EventId,
notifies: Vec<OwnedUserId>,
highlights: Vec<OwnedUserId>,
) -> AppResult<()> {
let (room_id, thread_id) = event_points::table
.find(event_id)
.select((event_points::room_id, event_points::thread_id))
.first::<(OwnedRoomId, Option<OwnedEventId>)>(&mut connect()?)?;
for user_id in notifies {
let rows = if let Some(thread_id) = &thread_id {
diesel::update(
event_push_summaries::table
.filter(event_push_summaries::user_id.eq(&user_id))
.filter(event_push_summaries::room_id.eq(&room_id))
.filter(event_push_summaries::thread_id.eq(thread_id)),
)
.set(
event_push_summaries::notification_count
.eq(event_push_summaries::notification_count + 1),
)
.execute(&mut connect()?)?
} else {
diesel::update(
event_push_summaries::table
.filter(event_push_summaries::user_id.eq(&user_id))
.filter(event_push_summaries::room_id.eq(&room_id))
.filter(event_push_summaries::thread_id.is_null()),
)
.set(
event_push_summaries::notification_count
.eq(event_push_summaries::notification_count + 1),
)
.execute(&mut connect()?)?
};
if rows == 0 {
diesel::insert_into(event_push_summaries::table)
.values((
event_push_summaries::user_id.eq(&user_id),
event_push_summaries::room_id.eq(&room_id),
event_push_summaries::notification_count.eq(1),
event_push_summaries::unread_count.eq(1),
event_push_summaries::thread_id.eq(&thread_id),
event_push_summaries::stream_ordering.eq(1), // TODO: use the correct stream ordering
))
.execute(&mut connect()?)?;
}
}
for user_id in highlights {
let rows = if let Some(thread_id) = &thread_id {
diesel::update(
event_push_summaries::table
.filter(event_push_summaries::user_id.eq(&user_id))
.filter(event_push_summaries::room_id.eq(&room_id))
.filter(event_push_summaries::thread_id.eq(thread_id)),
)
.set(
event_push_summaries::highlight_count.eq(event_push_summaries::highlight_count + 1),
)
.execute(&mut connect()?)?
} else {
diesel::update(
event_push_summaries::table
.filter(event_push_summaries::user_id.eq(&user_id))
.filter(event_push_summaries::room_id.eq(&room_id))
.filter(event_push_summaries::thread_id.is_null()),
)
.set(
event_push_summaries::highlight_count.eq(event_push_summaries::highlight_count + 1),
)
.execute(&mut connect()?)?
};
if rows == 0 {
diesel::insert_into(event_push_summaries::table)
.values((
event_push_summaries::user_id.eq(&user_id),
event_push_summaries::room_id.eq(&room_id),
event_push_summaries::highlight_count.eq(1),
event_push_summaries::unread_count.eq(1),
event_push_summaries::thread_id.eq(&thread_id),
event_push_summaries::stream_ordering.eq(1), // TODO: use the correct stream ordering
))
.execute(&mut connect()?)?;
}
}
Ok(())
}
#[tracing::instrument]
pub fn upsert_push_action(
room_id: &RoomId,
event_id: &EventId,
user_id: &UserId,
notify: bool,
highlight: bool,
) -> AppResult<()> {
let actions: Vec<String> = vec![];
let (event_sn, thread_id) = event_points::table
.find(event_id)
.select((event_points::event_sn, event_points::thread_id))
.first::<(Seqnum, Option<OwnedEventId>)>(&mut connect()?)?;
let (topological_ordering, stream_ordering) = events::table
.find(event_id)
.select((events::topological_ordering, events::stream_ordering))
.first::<(i64, i64)>(&mut connect()?)?;
crate::data::room::event::upsert_push_action(&NewDbEventPushAction {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
event_sn,
user_id: user_id.to_owned(),
profile_tag: "".to_owned(),
actions: serde_json::to_value(actions).expect("actions is always valid"),
topological_ordering,
stream_ordering,
notify,
highlight,
unread: false,
thread_id,
})?;
Ok(())
}
pub fn remove_actions_until(
user_id: &UserId,
room_id: &RoomId,
event_sn: Seqnum,
thread_id: Option<&EventId>,
) -> AppResult<()> {
if let Some(thread_id) = thread_id {
diesel::delete(
event_push_actions::table
.filter(event_push_actions::user_id.eq(user_id))
.filter(event_push_actions::room_id.eq(room_id))
.filter(event_push_actions::thread_id.eq(thread_id))
.filter(event_push_actions::event_sn.le(event_sn)),
)
.execute(&mut connect()?)?;
} else {
diesel::delete(
event_push_actions::table
.filter(event_push_actions::user_id.eq(user_id))
.filter(event_push_actions::room_id.eq(room_id))
.filter(event_push_actions::event_sn.le(event_sn)),
)
.execute(&mut connect()?)?;
}
Ok(())
}
pub fn remove_actions_for_room(user_id: &UserId, room_id: &RoomId) -> AppResult<()> {
diesel::delete(
event_push_actions::table
.filter(event_push_actions::user_id.eq(user_id))
.filter(event_push_actions::room_id.eq(room_id)),
)
.execute(&mut connect()?)?;
Ok(())
}
pub fn refresh_notify_summary(user_id: &UserId, room_id: &RoomId) -> AppResult<()> {
let thread_ids = event_push_actions::table
.filter(event_push_actions::user_id.eq(user_id))
.filter(event_push_actions::room_id.eq(room_id))
.select(event_push_actions::thread_id)
.distinct()
.load::<Option<OwnedEventId>>(&mut connect()?)?
.into_iter()
.flatten()
.collect::<Vec<_>>();
diesel::delete(
event_push_actions::table
.filter(event_push_actions::user_id.eq(user_id))
.filter(event_push_actions::room_id.eq(room_id))
.filter(event_push_actions::thread_id.is_not_null())
.filter(event_push_actions::thread_id.ne_all(&thread_ids)),
)
.execute(&mut connect()?)?;
diesel::delete(
event_push_summaries::table
.filter(event_push_summaries::user_id.eq(user_id))
.filter(event_push_summaries::room_id.eq(room_id))
.filter(event_push_summaries::thread_id.is_not_null())
.filter(event_push_summaries::thread_id.ne_all(&thread_ids)),
)
.execute(&mut connect()?)?;
for thread_id in &thread_ids {
let query = event_push_actions::table
.filter(event_push_actions::user_id.eq(user_id))
.filter(event_push_actions::room_id.eq(room_id))
.filter(event_push_actions::thread_id.eq(thread_id));
let notification_count = query
.filter(event_push_actions::notify.eq(true))
.count()
.get_result::<i64>(&mut connect()?)?;
let highlight_count = query
.filter(event_push_actions::highlight.eq(true))
.count()
.get_result::<i64>(&mut connect()?)?;
let unread_count = query
.filter(event_push_actions::unread.eq(true))
.count()
.get_result::<i64>(&mut connect()?)?;
let rows = diesel::update(
event_push_summaries::table
.filter(event_push_summaries::user_id.eq(&user_id))
.filter(event_push_summaries::room_id.eq(&room_id))
.filter(event_push_summaries::thread_id.eq(thread_id)),
)
.set((
event_push_summaries::notification_count.eq(notification_count),
event_push_summaries::highlight_count.eq(highlight_count),
event_push_summaries::unread_count.eq(unread_count),
))
.execute(&mut connect()?)?;
if rows == 0 {
diesel::insert_into(event_push_summaries::table)
.values((
event_push_summaries::user_id.eq(&user_id),
event_push_summaries::room_id.eq(&room_id),
event_push_summaries::thread_id.eq(thread_id),
event_push_summaries::notification_count.eq(notification_count),
event_push_summaries::highlight_count.eq(highlight_count),
event_push_summaries::unread_count.eq(unread_count),
event_push_summaries::stream_ordering.eq(1), // TODO: use the correct stream ordering
))
.execute(&mut connect()?)?;
}
}
let query = event_push_actions::table
.filter(event_push_actions::user_id.eq(user_id))
.filter(event_push_actions::room_id.eq(room_id))
.filter(event_push_actions::thread_id.is_null());
let notification_count = query
.filter(event_push_actions::notify.eq(true))
.count()
.get_result::<i64>(&mut connect()?)?;
let highlight_count = query
.filter(event_push_actions::highlight.eq(true))
.count()
.get_result::<i64>(&mut connect()?)?;
let unread_count = query
.filter(event_push_actions::unread.eq(true))
.count()
.get_result::<i64>(&mut connect()?)?;
let rows = diesel::update(
event_push_summaries::table
.filter(event_push_summaries::user_id.eq(&user_id))
.filter(event_push_summaries::room_id.eq(&room_id))
.filter(event_push_summaries::thread_id.is_null()),
)
.set((
event_push_summaries::notification_count.eq(notification_count),
event_push_summaries::highlight_count.eq(highlight_count),
event_push_summaries::unread_count.eq(unread_count),
))
.execute(&mut connect()?)?;
if rows == 0 {
diesel::insert_into(event_push_summaries::table)
.values((
event_push_summaries::user_id.eq(&user_id),
event_push_summaries::room_id.eq(&room_id),
event_push_summaries::notification_count.eq(notification_count),
event_push_summaries::highlight_count.eq(highlight_count),
event_push_summaries::unread_count.eq(unread_count),
event_push_summaries::stream_ordering.eq(1), // TODO: use the correct stream ordering
))
.execute(&mut connect()?)?;
}
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/receipt.rs | crates/server/src/room/receipt.rs | use std::collections::BTreeMap;
use diesel::prelude::*;
use crate::core::UnixMillis;
use crate::core::events::receipt::{
Receipt, ReceiptContent, ReceiptData, ReceiptEvent, ReceiptEventContent, ReceiptMap,
ReceiptType, Receipts,
};
use crate::core::federation::transaction::Edu;
use crate::core::identifiers::*;
use crate::data::room::DbReceipt;
use crate::data::schema::*;
use crate::data::{connect, next_sn};
use crate::{AppResult, sending};
/// Replaces the previous read receipt.
#[tracing::instrument]
pub fn update_read(
user_id: &UserId,
room_id: &RoomId,
event: &ReceiptEvent,
broadcast: bool,
) -> AppResult<()> {
for (event_id, receipts) in event.content.clone() {
let Ok(event_sn) = crate::event::get_event_sn(&event_id) else {
continue;
};
let mut conn = connect()?;
for (receipt_ty, user_receipts) in receipts {
if let Some(receipt) = user_receipts.get(user_id) {
let thread_id = match &receipt.thread {
crate::core::events::receipt::ReceiptThread::Thread(id) => Some(id.clone()),
_ => None,
};
let receipt_at = receipt.ts.unwrap_or_else(UnixMillis::now);
let receipt = DbReceipt {
sn: next_sn()?,
ty: receipt_ty.to_string(),
room_id: room_id.to_owned(),
user_id: user_id.to_owned(),
event_id: event_id.clone(),
event_sn,
thread_id,
json_data: serde_json::to_value(receipt)?,
receipt_at,
};
if let Err(e) = diesel::insert_into(event_receipts::table)
.values(&receipt)
.execute(&mut conn)
{
error!("failed to insert receipt: {}", e);
}
}
}
}
let receipts = BTreeMap::from_iter([(
room_id.to_owned(),
ReceiptMap::new(BTreeMap::from_iter([(
user_id.to_owned(),
ReceiptData::new(
Receipt::new(UnixMillis::now()),
event.content.0.keys().cloned().collect(),
),
)])),
)]);
let edu = Edu::Receipt(ReceiptContent::new(receipts));
if broadcast {
sending::send_edu_room(room_id, &edu)?;
}
Ok(())
}
/// Gets the latest private read receipt from the user in the room
pub fn last_private_read(user_id: &UserId, room_id: &RoomId) -> AppResult<ReceiptEventContent> {
let event_id = event_receipts::table
.filter(event_receipts::room_id.eq(room_id))
.filter(event_receipts::user_id.eq(user_id))
.filter(event_receipts::ty.eq(ReceiptType::ReadPrivate.to_string()))
.order_by(event_receipts::sn.desc())
.select(event_receipts::event_id)
.first::<OwnedEventId>(&mut connect()?)?;
// let room_sn = crate::room::get_room_sn(room_id)
// .map_err(|e| MatrixError::bad_state(format!("room does not exist in database for {room_id}: {e}")))?;
// let pdu = timeline::get_pdu(&event_id)?;
let user_id: OwnedUserId = user_id.to_owned();
let content: BTreeMap<OwnedEventId, Receipts> = BTreeMap::from_iter([(
event_id,
BTreeMap::from_iter([(
crate::core::events::receipt::ReceiptType::ReadPrivate,
BTreeMap::from_iter([(
user_id,
crate::core::events::receipt::Receipt {
ts: None, // TODO: start storing the timestamp so we can return one
thread: crate::core::events::receipt::ReceiptThread::Unthreaded,
},
)]),
)]),
)]);
Ok(ReceiptEventContent(content))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/alias.rs | crates/server/src/room/alias.rs | use diesel::prelude::*;
use rand::seq::SliceRandom;
use serde_json::value::to_raw_value;
use crate::core::UnixMillis;
use crate::core::appservice::query::{QueryRoomAliasReqArgs, query_room_alias_request};
use crate::core::client::room::AliasResBody;
use crate::core::events::TimelineEventType;
use crate::core::events::room::canonical_alias::RoomCanonicalAliasEventContent;
use crate::core::federation::query::directory_request;
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::DbUser;
use crate::exts::*;
use crate::room::{StateEventType, timeline};
use crate::{AppError, AppResult, GetUrlOrigin, MatrixError, PduBuilder, config};
mod remote;
use remote::remote_resolve;
#[derive(Insertable, Identifiable, Queryable, Debug, Clone)]
#[diesel(table_name = room_aliases, primary_key(alias_id))]
pub struct DbRoomAlias {
pub alias_id: OwnedRoomAliasId,
pub room_id: OwnedRoomId,
pub created_by: OwnedUserId,
pub created_at: UnixMillis,
}
#[inline]
pub async fn resolve(room: &RoomOrAliasId) -> AppResult<OwnedRoomId> {
resolve_with_servers(room, None)
.await
.map(|(room_id, _)| room_id)
}
pub async fn resolve_with_servers(
room: &RoomOrAliasId,
servers: Option<Vec<OwnedServerName>>,
) -> AppResult<(OwnedRoomId, Vec<OwnedServerName>)> {
if room.is_room_id() {
let room_id: &RoomId = room.try_into().expect("valid RoomId");
Ok((room_id.to_owned(), servers.unwrap_or_default()))
} else {
let alias: &RoomAliasId = room.try_into().expect("valid RoomAliasId");
resolve_alias(alias, servers).await
}
}
#[tracing::instrument(name = "resolve")]
pub async fn resolve_alias(
room_alias: &RoomAliasId,
servers: Option<Vec<OwnedServerName>>,
) -> AppResult<(OwnedRoomId, Vec<OwnedServerName>)> {
let server_name = room_alias.server_name();
let is_local_server = server_name.is_local();
let servers_contains_local = || {
let conf = crate::config::get();
servers
.as_ref()
.is_some_and(|servers| servers.contains(&conf.server_name))
};
if !is_local_server && !servers_contains_local() {
return remote_resolve(room_alias, servers.unwrap_or_default()).await;
}
let room_id = match resolve_local_alias(room_alias) {
Ok(r) => r,
Err(_) => resolve_appservice_alias(room_alias).await?,
};
Ok((room_id, Vec::new()))
}
#[tracing::instrument(level = "debug")]
pub fn resolve_local_alias(alias_id: &RoomAliasId) -> AppResult<OwnedRoomId> {
let room_id = room_aliases::table
.filter(room_aliases::alias_id.eq(alias_id))
.select(room_aliases::room_id)
.first::<String>(&mut connect()?)?;
RoomId::parse(room_id).map_err(|_| AppError::public("Room ID is invalid."))
}
async fn resolve_appservice_alias(room_alias: &RoomAliasId) -> AppResult<OwnedRoomId> {
for appservice in crate::appservice::all()?.values() {
if appservice.aliases.is_match(room_alias.as_str())
&& let Some(url) = &appservice.registration.url
{
let request = query_room_alias_request(
url,
QueryRoomAliasReqArgs {
room_alias: room_alias.to_owned(),
},
)?
.into_inner();
if matches!(
crate::sending::send_appservice_request::<Option<()>>(
appservice.registration.clone(),
request
)
.await,
Ok(Some(_opt_result))
) {
return resolve_local_alias(room_alias)
.map_err(|_| MatrixError::not_found("Room does not exist.").into());
}
}
}
Err(MatrixError::not_found("resolve appservice alias not found").into())
}
pub fn local_aliases_for_room(room_id: &RoomId) -> AppResult<Vec<OwnedRoomAliasId>> {
room_aliases::table
.filter(room_aliases::room_id.eq(room_id))
.select(room_aliases::alias_id)
.load::<OwnedRoomAliasId>(&mut connect()?)
.map_err(Into::into)
}
pub fn all_local_aliases() -> AppResult<Vec<(OwnedRoomId, String)>> {
let lists = room_aliases::table
.select((room_aliases::room_id, room_aliases::alias_id))
.load::<(OwnedRoomId, OwnedRoomAliasId)>(&mut connect()?)?
.into_iter()
.map(|(room_id, alias_id)| (room_id, alias_id.alias().to_owned()))
.collect::<Vec<_>>();
Ok(lists)
}
pub fn is_admin_room(room_id: &RoomId) -> bool {
admin_room_id().is_ok_and(|admin_room_id| admin_room_id == room_id)
}
pub fn admin_room_id() -> AppResult<OwnedRoomId> {
crate::room::resolve_local_alias(
<&RoomAliasId>::try_from(format!("#admins:{}", &config::get().server_name).as_str())
.expect("#admins:server_name is a valid room alias"),
)
}
pub fn set_alias(
room_id: impl Into<OwnedRoomId>,
alias_id: impl Into<OwnedRoomAliasId>,
created_by: impl Into<OwnedUserId>,
) -> AppResult<()> {
let alias_id = alias_id.into();
let room_id = room_id.into();
diesel::insert_into(room_aliases::table)
.values(DbRoomAlias {
alias_id,
room_id,
created_by: created_by.into(),
created_at: UnixMillis::now(),
})
.on_conflict_do_nothing()
.execute(&mut connect()?)
.map(|_| ())
.map_err(Into::into)
}
pub async fn get_alias_response(room_alias: OwnedRoomAliasId) -> AppResult<AliasResBody> {
if room_alias.server_name() != config::get().server_name {
let request =
directory_request(&room_alias.server_name().origin().await, &room_alias)?.into_inner();
let mut body =
crate::sending::send_federation_request(room_alias.server_name(), request, None)
.await?
.json::<AliasResBody>()
.await?;
body.servers.shuffle(&mut rand::rng());
return Ok(body);
}
let mut room_id = None;
match resolve_local_alias(&room_alias) {
Ok(r) => room_id = Some(r),
Err(_) => {
for appservice in crate::appservice::all()?.values() {
let url = appservice
.registration
.build_url(&format!("app/v1/rooms/{room_alias}"))?;
if appservice.aliases.is_match(room_alias.as_str())
&& matches!(
crate::sending::post(url).send::<Option<()>>().await,
Ok(Some(_opt_result))
)
{
room_id = Some(resolve_local_alias(&room_alias).map_err(|_| {
AppError::public("Appservice lied to us. Room does not exist.")
})?);
break;
}
}
}
};
let room_id = match room_id {
Some(room_id) => room_id,
None => return Err(MatrixError::not_found("Room with alias not found.").into()),
};
Ok(AliasResBody::new(
room_id,
vec![config::get().server_name.to_owned()],
))
}
#[tracing::instrument]
pub async fn remove_alias(alias_id: &RoomAliasId, user: &DbUser) -> AppResult<()> {
let room_id = resolve_local_alias(alias_id)?;
let room_version = crate::room::get_version(&room_id)?;
if user_can_remove_alias(alias_id, user).await? {
let state_alias = super::get_canonical_alias(&room_id);
if state_alias.is_ok() {
timeline::build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomCanonicalAlias,
content: to_raw_value(&RoomCanonicalAliasEventContent {
alias: None,
alt_aliases: vec![], // TODO
})
.expect("We checked that alias earlier, it must be fine"),
state_key: Some("".to_owned()),
..Default::default()
},
&user.id,
&room_id,
&room_version,
&super::lock_state(&room_id).await,
)
.await
.ok();
}
diesel::delete(room_aliases::table.filter(room_aliases::alias_id.eq(alias_id)))
.execute(&mut connect()?)?;
Ok(())
} else {
Err(MatrixError::forbidden("User is not permitted to remove this alias.", None).into())
}
}
#[tracing::instrument]
async fn user_can_remove_alias(alias_id: &RoomAliasId, user: &DbUser) -> AppResult<bool> {
let room_id = resolve_local_alias(alias_id)?;
let alias = room_aliases::table
.find(alias_id)
.first::<DbRoomAlias>(&mut connect()?)?;
// The creator of an alias can remove it
if alias.created_by == user.id
// Server admins can remove any local alias
|| user.is_admin
// Always allow the Palpo user to remove the alias, since there may not be an admin room
|| config::server_user_id()== user.id
{
Ok(true)
// Checking whether the user is able to change canonical aliases of the room
} else if let Ok(power_levels) = super::get_power_levels(&room_id).await {
Ok(power_levels.user_can_send_state(&user.id, StateEventType::RoomCanonicalAlias))
// If there is no power levels event, only the room creator can change canonical aliases
} else if let Ok(event) = super::get_state(&room_id, &StateEventType::RoomCreate, "", None) {
Ok(event.sender == user.id)
} else {
error!("Room {} has no m.room.create event (VERY BAD)!", room_id);
Err(AppError::public("Room has no m.room.create event"))
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/timeline.rs | crates/server/src/room/timeline.rs | use std::borrow::Borrow;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::sync::{LazyLock, Mutex};
use diesel::prelude::*;
use serde::Deserialize;
use crate::core::Seqnum;
use crate::core::events::push_rules::PushRulesEventContent;
use crate::core::events::room::canonical_alias::RoomCanonicalAliasEventContent;
use crate::core::events::room::encrypted::Relation;
use crate::core::events::room::member::MembershipState;
use crate::core::events::{GlobalAccountDataEventType, StateEventType, TimelineEventType};
use crate::core::identifiers::*;
use crate::core::presence::PresenceState;
use crate::core::push::{Action, Ruleset, Tweak};
use crate::core::serde::{CanonicalJsonObject, CanonicalJsonValue, JsonValue, to_canonical_object};
use crate::core::state::{Event, StateError, event_auth};
use crate::data::room::{DbEvent, DbEventData, NewDbEvent, NewDbEventEdge};
use crate::data::schema::*;
use crate::data::{connect, diesel_exists};
use crate::event::{EventHash, PduBuilder, PduEvent};
use crate::room::{push_action, state, timeline};
use crate::{
AppError, AppResult, MatrixError, RoomMutexGuard, SnPduEvent, config, data, membership, utils,
};
mod backfill;
pub mod stream;
pub mod topolo;
pub use backfill::*;
pub static LAST_TIMELINE_COUNT_CACHE: LazyLock<Mutex<HashMap<OwnedRoomId, i64>>> =
LazyLock::new(Default::default);
// pub static PDU_CACHE: LazyLock<Mutex<LruCache<OwnedRoomId, Arc<PduEvent>>>> = LazyLock::new(Default::default);
#[tracing::instrument]
pub fn first_pdu_in_room(room_id: &RoomId) -> AppResult<Option<PduEvent>> {
event_datas::table
.filter(event_datas::room_id.eq(room_id))
.order(event_datas::event_sn.asc())
.select((event_datas::event_id, event_datas::json_data))
.first::<(OwnedEventId, JsonValue)>(&mut connect()?)
.optional()?
.map(|(event_id, json)| {
PduEvent::from_json_value(room_id, &event_id, json)
.map_err(|_e| AppError::internal("invalid pdu in db"))
})
.transpose()
}
#[tracing::instrument]
pub fn last_event_sn(user_id: &UserId, room_id: &RoomId) -> AppResult<Seqnum> {
let event_sn = events::table
.filter(events::room_id.eq(room_id))
.filter(events::sn.is_not_null())
.select(events::sn)
.order(events::sn.desc())
.first::<Seqnum>(&mut connect()?)?;
Ok(event_sn)
}
/// Returns the json of a pdu.
pub fn get_pdu_json(event_id: &EventId) -> AppResult<Option<CanonicalJsonObject>> {
event_datas::table
.filter(event_datas::event_id.eq(event_id))
.select(event_datas::json_data)
.first::<JsonValue>(&mut connect()?)
.optional()?
.map(|json| {
serde_json::from_value(json).map_err(|_e| AppError::internal("invalid pdu in db"))
})
.transpose()
}
/// Returns the pdu.
pub fn get_non_outlier_pdu(event_id: &EventId) -> AppResult<Option<SnPduEvent>> {
let Some((event_sn, room_id, stream_ordering)) = events::table
.filter(events::is_outlier.eq(false))
.filter(events::id.eq(event_id))
.select((events::sn, events::room_id, events::stream_ordering))
.first::<(Seqnum, OwnedRoomId, i64)>(&mut connect()?)
.optional()?
else {
return Ok(None);
};
let mut pdu = event_datas::table
.filter(event_datas::event_id.eq(event_id))
.select(event_datas::json_data)
.first::<JsonValue>(&mut connect()?)
.optional()?
.map(|json| {
SnPduEvent::from_json_value(
&room_id,
event_id,
event_sn,
json,
false,
false,
stream_ordering < 0,
)
.map_err(|_e| AppError::internal("invalid pdu in db"))
})
.transpose()?;
if let Some(pdu) = pdu.as_mut() {
let event = events::table
.filter(events::id.eq(event_id))
.first::<DbEvent>(&mut connect()?)?;
pdu.is_outlier = event.is_outlier;
pdu.soft_failed = event.soft_failed;
pdu.rejection_reason = event.rejection_reason;
}
Ok(pdu)
}
pub fn get_pdu(event_id: &EventId) -> AppResult<SnPduEvent> {
let event = events::table
.filter(events::id.eq(event_id))
.first::<DbEvent>(&mut connect()?)?;
let (event_sn, room_id, json) = event_datas::table
.filter(event_datas::event_id.eq(event_id))
.select((
event_datas::event_sn,
event_datas::room_id,
event_datas::json_data,
))
.first::<(Seqnum, OwnedRoomId, JsonValue)>(&mut connect()?)?;
let mut pdu = PduEvent::from_json_value(&room_id, event_id, json)
.map_err(|_e| AppError::internal("invalid pdu in db"))?;
pdu.rejection_reason = event.rejection_reason;
Ok(SnPduEvent {
pdu,
event_sn,
is_outlier: event.is_outlier,
soft_failed: event.soft_failed,
is_backfill: event.stream_ordering < 0,
})
}
pub fn get_pdu_and_data(event_id: &EventId) -> AppResult<(SnPduEvent, CanonicalJsonObject)> {
let event = events::table
.filter(events::id.eq(event_id))
.first::<DbEvent>(&mut connect()?)?;
let (event_sn, room_id, json) = event_datas::table
.filter(event_datas::event_id.eq(event_id))
.select((
event_datas::event_sn,
event_datas::room_id,
event_datas::json_data,
))
.first::<(Seqnum, OwnedRoomId, JsonValue)>(&mut connect()?)?;
let data = serde_json::from_value(json.clone())
.map_err(|_e| AppError::internal("invalid pdu in db"))?;
let mut pdu = PduEvent::from_json_value(&room_id, event_id, json)
.map_err(|_e| AppError::internal("invalid pdu in db"))?;
pdu.rejection_reason = event.rejection_reason;
Ok((
SnPduEvent {
pdu,
event_sn,
is_outlier: event.is_outlier,
soft_failed: event.soft_failed,
is_backfill: event.stream_ordering < 0,
},
data,
))
}
pub fn get_may_missing_pdus(
room_id: &RoomId,
event_ids: &[OwnedEventId],
) -> AppResult<(Vec<SnPduEvent>, Vec<OwnedEventId>)> {
let events = event_datas::table
.filter(event_datas::room_id.eq(room_id))
.filter(event_datas::event_id.eq_any(event_ids))
.select(event_datas::event_id)
.load::<OwnedEventId>(&mut connect()?)?;
let mut pdus = Vec::with_capacity(events.len());
let mut missing_ids = event_ids.iter().cloned().collect::<HashSet<_>>();
for event_id in events {
let Ok(pdu) = timeline::get_pdu(&event_id) else {
continue;
};
pdus.push(pdu);
missing_ids.remove(&event_id);
}
Ok((pdus, missing_ids.into_iter().collect()))
}
pub fn has_pdu(event_id: &EventId) -> bool {
if let Ok(mut conn) = connect() {
diesel_exists!(
event_datas::table.filter(event_datas::event_id.eq(event_id)),
&mut conn
)
.unwrap_or(false)
} else {
false
}
}
/// Removes a pdu and creates a new one with the same id.
#[tracing::instrument]
pub fn replace_pdu(event_id: &EventId, pdu_json: &CanonicalJsonObject) -> AppResult<()> {
diesel::update(event_datas::table.filter(event_datas::event_id.eq(event_id)))
.set(event_datas::json_data.eq(serde_json::to_value(pdu_json)?))
.execute(&mut connect()?)?;
// PDU_CACHE.lock().unwrap().remove(&(*pdu.event_id).to_owned());
Ok(())
}
/// Creates a new persisted data unit and adds it to a room.
///
/// By this point the incoming event should be fully authenticated, no auth happens
/// in `append_pdu`.
///
/// Returns pdu id
#[tracing::instrument(skip_all)]
pub async fn append_pdu(
pdu: &SnPduEvent,
mut pdu_json: CanonicalJsonObject,
state_lock: &RoomMutexGuard,
) -> AppResult<()> {
let conf = crate::config::get();
// Make unsigned fields correct. This is not properly documented in the spec, but state
// events need to have previous content in the unsigned field, so clients can easily
// interpret things like membership changes
if let Some(state_key) = &pdu.state_key {
if let CanonicalJsonValue::Object(unsigned) = pdu_json
.entry("unsigned".to_owned())
.or_insert_with(|| CanonicalJsonValue::Object(Default::default()))
{
if let Ok(state_frame_id) = state::get_pdu_frame_id(&pdu.event_id)
&& let Ok(prev_state) = state::get_state(
state_frame_id - 1,
&pdu.event_ty.to_string().into(),
state_key,
)
{
unsigned.insert(
"prev_content".to_owned(),
CanonicalJsonValue::Object(
to_canonical_object(prev_state.content.clone())
.expect("event is valid, we just created it"),
),
);
unsigned.insert(
"prev_sender".to_owned(),
CanonicalJsonValue::String(prev_state.sender.to_string()),
);
unsigned.insert(
"replaces_state".to_owned(),
CanonicalJsonValue::String(prev_state.event_id.to_string()),
);
}
} else {
error!("invalid unsigned type in pdu");
}
}
let mut leaves: BTreeSet<_> = state::get_forward_extremities(&pdu.room_id)?
.into_iter()
.collect();
// Remove any forward extremities that are referenced by this incoming event's prev_events
leaves.retain(|event_id| !pdu.prev_events.contains(event_id));
if !diesel_exists!(
event_edges::table.filter(event_edges::prev_id.eq(&pdu.event_id)),
&mut connect()?
)? {
// Only add the incoming event as a forward extremity if it is not already in the DB
leaves.insert(pdu.event_id.clone());
}
state::set_forward_extremities(&pdu.room_id, leaves.iter().map(Borrow::borrow), state_lock)?;
state::update_backward_extremities(&pdu)?;
#[derive(Deserialize, Clone, Debug)]
struct ExtractEventId {
event_id: OwnedEventId,
}
#[derive(Deserialize, Clone, Debug)]
struct ExtractRelatesToEventId {
#[serde(rename = "m.relates_to")]
relates_to: ExtractEventId,
}
let mut relates_added = false;
if let Ok(content) = pdu.get_content::<ExtractRelatesTo>() {
let rel_type = content.relates_to.rel_type();
match content.relates_to {
Relation::Reply { in_reply_to } => {
// We need to do it again here, because replies don't have event_id as a top level field
super::pdu_metadata::add_relation(
&pdu.room_id,
&in_reply_to.event_id,
&pdu.event_id,
rel_type,
)?;
relates_added = true;
}
Relation::Thread(thread) => {
super::pdu_metadata::add_relation(
&pdu.room_id,
&thread.event_id,
&pdu.event_id,
rel_type,
)?;
relates_added = true;
// thread_id = Some(thread.event_id.clone());
super::thread::add_to_thread(&thread.event_id, pdu)?;
}
_ => {} // TODO: Aggregate other types
}
}
if !relates_added && let Ok(content) = pdu.get_content::<ExtractRelatesToEventId>() {
super::pdu_metadata::add_relation(
&pdu.room_id,
&content.relates_to.event_id,
&pdu.event_id,
None,
)?;
}
let sync_pdu = pdu.to_sync_room_event();
let mut notifies = Vec::new();
let mut highlights = Vec::new();
for user_id in super::get_our_real_users(&pdu.room_id)?.iter() {
// Don't notify the user of their own events
if user_id == &pdu.sender {
continue;
}
let rules_for_user = data::user::get_global_data::<PushRulesEventContent>(
user_id,
&GlobalAccountDataEventType::PushRules.to_string(),
)?
.map(|content: PushRulesEventContent| content.global)
.unwrap_or_else(|| Ruleset::server_default(user_id));
let mut highlight = false;
let mut notify = false;
if let Ok(power_levels) = crate::room::get_power_levels(pdu.room_id()).await {
for action in data::user::pusher::get_actions(
user_id,
&rules_for_user,
&power_levels,
&sync_pdu,
&pdu.room_id,
)
.await?
{
match action {
Action::Notify => notify = true,
Action::SetTweak(Tweak::Highlight(true)) => {
highlight = true;
}
_ => {}
};
}
}
if notify {
notifies.push(user_id.clone());
}
if highlight {
highlights.push(user_id.clone());
}
if let Err(e) =
push_action::upsert_push_action(&pdu.room_id, &pdu.event_id, user_id, notify, highlight)
{
error!("failed to upsert event push action: {}", e);
}
push_action::refresh_notify_summary(&pdu.sender, &pdu.room_id)?;
for push_key in data::user::pusher::get_push_keys(user_id)? {
crate::sending::send_push_pdu(&pdu.event_id, user_id, push_key)?;
}
}
match pdu.event_ty {
TimelineEventType::RoomRedaction => {
if let Some(redact_id) = &pdu.redacts {
redact_pdu(redact_id, pdu)?;
}
}
TimelineEventType::SpaceChild => {
if let Some(_state_key) = &pdu.state_key {
let mut cache = super::space::ROOM_ID_SPACE_CHUNK_CACHE.lock().unwrap();
cache.remove(&(pdu.room_id.clone(), false));
cache.remove(&(pdu.room_id.clone(), true));
}
}
TimelineEventType::RoomMember => {
if let Some(state_key) = &pdu.state_key {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
// if the state_key fails
let target_user_id = UserId::parse(state_key.clone())
.expect("This state_key was previously validated");
let content = pdu
.get_content::<ExtractMembership>()
.map_err(|_| AppError::internal("Invalid content in pdu."))?;
let stripped_state = match content.membership {
MembershipState::Invite | MembershipState::Knock => {
let state = state::summary_stripped(pdu)?;
Some(state)
}
_ => None,
};
if content.membership == MembershipState::Join {
let _ = crate::user::ping_presence(&pdu.sender, &PresenceState::Online);
}
// Update our membership info, we do this here incase a user is invited
// and immediately leaves we need the DB to record the invite event for auth
membership::update_membership(
&pdu.event_id,
pdu.event_sn,
&pdu.room_id,
&target_user_id,
content.membership,
&pdu.sender,
stripped_state,
)?;
}
}
TimelineEventType::RoomMessage => {
#[derive(Deserialize)]
struct ExtractBody {
body: Option<String>,
}
let content = pdu
.get_content::<ExtractBody>()
.map_err(|_| AppError::internal("Invalid content in pdu."))?;
if let Some(body) = content.body
&& let Ok(admin_room) = super::resolve_local_alias(
<&RoomAliasId>::try_from(format!("#admins:{}", &conf.server_name).as_str())
.expect("#admins:server_name is a valid room alias"),
)
{
let server_user = config::server_user_id();
let to_palpo = body.starts_with(&format!("{server_user}: "))
|| body.starts_with(&format!("{server_user} "))
|| body == format!("{server_user}:")
|| body == format!("{server_user}");
// This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as palpo
let from_palpo = pdu.sender == server_user && conf.emergency_password.is_none();
if to_palpo && !from_palpo && admin_room == pdu.room_id {
let _ = crate::admin::executor()
.command(body, Some(pdu.event_id.clone()))
.await;
}
}
}
TimelineEventType::RoomTombstone => {
#[derive(Deserialize)]
struct ExtractReplacementRoom {
replacement_room: Option<OwnedRoomId>,
}
let content = pdu
.get_content::<ExtractReplacementRoom>()
.map_err(|_| AppError::internal("invalid content in tombstone pdu"))?;
if let Some(new_room_id) = content.replacement_room {
let local_user_ids = super::user::local_users(&pdu.room_id)?;
for user_id in &local_user_ids {
super::user::copy_room_tags_and_direct_to_room(
user_id,
&pdu.room_id,
&new_room_id,
)?;
super::user::copy_push_rules_from_room_to_room(
user_id,
&pdu.room_id,
&new_room_id,
)?;
}
}
}
_ => {}
}
DbEventData {
event_id: pdu.event_id.clone(),
event_sn: pdu.event_sn,
room_id: pdu.room_id.to_owned(),
internal_metadata: None,
json_data: serde_json::to_value(&pdu_json)?,
format_version: None,
}
.save()?;
diesel::update(events::table.find(&*pdu.event_id))
.set(events::is_outlier.eq(false))
.execute(&mut connect()?)?;
for prev_id in &pdu.prev_events {
let new_edge = NewDbEventEdge {
room_id: pdu.room_id.clone(),
event_depth: pdu.depth as i64,
event_id: pdu.event_id.clone(),
event_sn: pdu.event_sn,
prev_id: prev_id.clone(),
};
if let Err(e) = new_edge.save() {
error!("failed to save event edge: {}", e);
}
}
// Update Relationships
#[derive(Deserialize, Clone, Debug)]
struct ExtractRelatesTo {
#[serde(rename = "m.relates_to")]
relates_to: Relation,
}
crate::event::search::save_pdu(pdu, &pdu_json)?;
let frame_id = state::append_to_state(pdu)?;
// We set the room state after inserting the pdu, so that we never have a moment in time
// where events in the current room state do not exist
state::set_room_state(&pdu.room_id, frame_id)?;
if let Err(e) = push_action::increment_notification_counts(&pdu.event_id, notifies, highlights)
{
error!("failed to increment notification counts: {}", e);
}
for appservice in crate::appservice::all()?.values() {
if super::appservice_in_room(&pdu.room_id, appservice)? {
crate::sending::send_pdu_appservice(appservice.registration.id.clone(), &pdu.event_id)?;
continue;
}
// If the RoomMember event has a non-empty state_key, it is targeted at someone.
// If it is our appservice user, we send this PDU to it.
if pdu.event_ty == TimelineEventType::RoomMember
&& let Some(state_key_uid) = &pdu
.state_key
.as_ref()
.and_then(|state_key| UserId::parse(state_key.as_str()).ok())
&& let Ok(appservice_uid) = UserId::parse_with_server_name(
&*appservice.registration.sender_localpart,
&conf.server_name,
)
&& state_key_uid == &appservice_uid
{
crate::sending::send_pdu_appservice(appservice.registration.id.clone(), &pdu.event_id)?;
continue;
}
let matching_users = || {
config::get().server_name == pdu.sender.server_name()
&& appservice.is_user_match(&pdu.sender)
|| pdu.event_ty == TimelineEventType::RoomMember
&& pdu.state_key.as_ref().is_some_and(|state_key| {
UserId::parse(state_key).is_ok_and(|user_id| {
config::get().server_name == user_id.server_name()
&& appservice.is_user_match(&user_id)
})
})
};
let matching_aliases = || {
super::local_aliases_for_room(&pdu.room_id)
.unwrap_or_default()
.iter()
.any(|room_alias| appservice.aliases.is_match(room_alias.as_str()))
|| if let Ok(pdu) =
super::get_state(&pdu.room_id, &StateEventType::RoomCanonicalAlias, "", None)
{
pdu.get_content::<RoomCanonicalAliasEventContent>()
.is_ok_and(|content| {
content
.alias
.is_some_and(|alias| appservice.aliases.is_match(alias.as_str()))
|| content
.alt_aliases
.iter()
.any(|alias| appservice.aliases.is_match(alias.as_str()))
})
} else {
false
}
};
if matching_aliases() || appservice.rooms.is_match(pdu.room_id.as_str()) || matching_users()
{
crate::sending::send_pdu_appservice(appservice.registration.id.clone(), &pdu.event_id)?;
}
}
Ok(())
}
fn check_pdu_for_admin_room(pdu: &PduEvent, sender: &UserId) -> AppResult<()> {
let conf = crate::config::get();
match pdu.event_type() {
TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room");
return Err(MatrixError::forbidden(
"Encryption is not allowed in the admins room.",
None,
)
.into());
}
TimelineEventType::RoomMember => {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
let target = pdu
.state_key
.clone()
.filter(|v| v.starts_with("@"))
.unwrap_or(sender.as_str().to_owned());
let server_name = &conf.server_name;
let server_user = config::server_user_id();
let content = pdu
.get_content::<ExtractMembership>()
.map_err(|_| AppError::internal("invalid content in pdu."))?;
if content.membership == MembershipState::Leave {
if target == *server_user {
warn!("Palpo user cannot leave from admins room");
return Err(MatrixError::forbidden(
"Palpo user cannot leave from admins room.",
None,
)
.into());
}
let count = super::joined_users(pdu.room_id(), None)?
.iter()
.filter(|m| m.server_name() == server_name)
.filter(|m| m.as_str() != target)
.count();
if count < 2 {
warn!("Last admin cannot leave from admins room");
return Err(MatrixError::forbidden(
"Last admin cannot leave from admins room.",
None,
)
.into());
}
}
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == *server_user {
warn!("Palpo user cannot be banned in admins room");
return Err(MatrixError::forbidden(
"Palpo user cannot be banned in admins room.",
None,
)
.into());
}
let count = super::joined_users(pdu.room_id(), None)?
.iter()
.filter(|m| m.server_name() == server_name)
.filter(|m| m.as_str() != target)
.count();
if count < 2 {
warn!("Last admin cannot be banned in admins room");
return Err(MatrixError::forbidden(
"Last admin cannot be banned in admins room.",
None,
)
.into());
}
}
}
_ => {}
}
Ok(())
}
/// Creates a new persisted data unit and adds it to a room.
#[tracing::instrument(skip_all)]
pub async fn build_and_append_pdu(
pdu_builder: PduBuilder,
sender: &UserId,
room_id: &RoomId,
room_version: &RoomVersionId,
state_lock: &RoomMutexGuard,
) -> AppResult<SnPduEvent> {
if let Some(state_key) = &pdu_builder.state_key
&& let Ok(curr_state) = super::get_state(
room_id,
&pdu_builder.event_type.to_string().into(),
state_key,
None,
)
&& curr_state.content.get() == pdu_builder.content.get()
{
return Ok(curr_state);
}
let (pdu, pdu_json, _event_guard) = pdu_builder
.hash_sign_save(sender, room_id, room_version, state_lock)
.await?;
let room_id = &pdu.room_id;
crate::room::ensure_room(room_id, room_version)?;
// let conf = crate::config::get();
// let admin_room = super::resolve_local_alias(
// <&RoomAliasId>::try_from(format!("#admins:{}", &conf.server_name).as_str())
// .expect("#admins:server_name is a valid room alias"),
// )?;
if crate::room::is_admin_room(room_id)? {
check_pdu_for_admin_room(&pdu, sender)?;
}
append_pdu(&pdu, pdu_json, state_lock).await?;
// In case we are kicking or banning a user, we need to inform their server of the change
// move to append pdu
// if pdu.event_ty == TimelineEventType::RoomMember {
// crate::room::update_joined_servers(&room_id)?;
// crate::room::update_currents(&room_id)?;
// }
let servers = super::participating_servers(room_id, false)?;
crate::sending::send_pdu_servers(servers.into_iter(), &pdu.event_id)?;
Ok(pdu)
}
/// Replace a PDU with the redacted form.
#[tracing::instrument(skip(reason))]
pub fn redact_pdu(event_id: &EventId, reason: &PduEvent) -> AppResult<()> {
// TODO: Don't reserialize, keep original json
if let Ok(mut pdu) = get_pdu(event_id) {
pdu.redact(reason)?;
replace_pdu(event_id, &to_canonical_object(&pdu)?)?;
diesel::update(events::table.filter(events::id.eq(event_id)))
.set(events::is_redacted.eq(true))
.execute(&mut connect()?)?;
diesel::delete(event_searches::table.filter(event_searches::event_id.eq(event_id)))
.execute(&mut connect()?)?;
}
// If event does not exist, just noop
Ok(())
}
pub fn is_event_next_to_backward_gap(event: &PduEvent) -> AppResult<bool> {
let mut event_ids = event.prev_events.clone();
event_ids.push(event.event_id().to_owned());
let query = event_backward_extremities::table
.filter(event_backward_extremities::room_id.eq(event.room_id()))
.filter(event_backward_extremities::event_id.eq_any(event_ids));
Ok(diesel_exists!(query, &mut connect()?)?)
}
pub fn is_event_next_to_forward_gap(event: &PduEvent) -> AppResult<bool> {
let mut event_ids = event.prev_events.clone();
event_ids.push(event.event_id().to_owned());
let query = event_forward_extremities::table
.filter(event_forward_extremities::room_id.eq(event.room_id()))
.filter(event_forward_extremities::event_id.eq_any(event_ids));
Ok(diesel_exists!(query, &mut connect()?)?)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/typing.rs | crates/server/src/room/typing.rs | use std::collections::BTreeMap;
use std::sync::LazyLock;
use tokio::sync::{RwLock, broadcast};
use crate::core::UnixMillis;
use crate::core::events::SyncEphemeralRoomEvent;
use crate::core::events::typing::{TypingContent, TypingEventContent};
use crate::core::federation::transaction::Edu;
use crate::core::identifiers::*;
use crate::{AppResult, IsRemoteOrLocal, data, sending};
pub static TYPING: LazyLock<RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>> =
LazyLock::new(Default::default); // u64 is unix timestamp of timeout
pub static LAST_TYPING_UPDATE: LazyLock<RwLock<BTreeMap<OwnedRoomId, i64>>> =
LazyLock::new(Default::default); // timestamp of the last change to typing users
pub static TYPING_UPDATE_SENDER: LazyLock<broadcast::Sender<OwnedRoomId>> =
LazyLock::new(|| broadcast::channel(100).0);
/// Sets a user as typing until the timeout timestamp is reached or roomremove_typing is
/// called.
pub async fn add_typing(
user_id: &UserId,
room_id: &RoomId,
timeout: u64,
broadcast: bool,
) -> AppResult<()> {
TYPING
.write()
.await
.entry(room_id.to_owned())
.or_default()
.insert(user_id.to_owned(), timeout);
let event_sn = data::next_sn()?;
LAST_TYPING_UPDATE
.write()
.await
.insert(room_id.to_owned(), event_sn);
// let current_frame_id = if let Some(s) = crate::room::get_frame_id(room_id, None)? {
// s
// } else {
// error!("Room {} has no state", room_id);
// return Err(AppError::public("Room has no state"));
// };
// // Save the state after this sync so we can send the correct state diff next sync
// let point_id = state::ensure_point(&room_id, &OwnedEventId::from_str(&Ulid::new().to_string())?, event_sn as i64)?;
// state::update_frame_id(point_id, current_frame_id)?;
let _ = TYPING_UPDATE_SENDER.send(room_id.to_owned());
if broadcast && user_id.is_local() {
federation_send(room_id, user_id, true).await.ok();
}
Ok(())
}
/// Removes a user from typing before the timeout is reached.
pub async fn remove_typing(user_id: &UserId, room_id: &RoomId, broadcast: bool) -> AppResult<()> {
TYPING
.write()
.await
.entry(room_id.to_owned())
.or_default()
.remove(user_id);
LAST_TYPING_UPDATE
.write()
.await
.insert(room_id.to_owned(), data::next_sn()?);
let _ = TYPING_UPDATE_SENDER.send(room_id.to_owned());
if broadcast && user_id.is_local() {
federation_send(room_id, user_id, false).await.ok();
}
Ok(())
}
pub async fn wait_for_update(room_id: &RoomId) -> AppResult<()> {
let mut receiver = TYPING_UPDATE_SENDER.subscribe();
while let Ok(next) = receiver.recv().await {
if next == room_id {
break;
}
}
Ok(())
}
/// Makes sure that typing events with old timestamps get removed.
async fn maintain_typings(room_id: &RoomId) -> AppResult<()> {
let current_timestamp = UnixMillis::now();
let mut removable = Vec::new();
{
let typing = TYPING.read().await;
let Some(room) = typing.get(room_id) else {
return Ok(());
};
for (user_id, timeout) in room {
if *timeout < current_timestamp.get() {
removable.push(user_id.clone());
}
}
drop(typing);
}
if !removable.is_empty() {
let typing = &mut TYPING.write().await;
let room = typing.entry(room_id.to_owned()).or_default();
for user_id in &removable {
room.remove(user_id);
}
LAST_TYPING_UPDATE
.write()
.await
.insert(room_id.to_owned(), data::next_sn()?);
let _ = TYPING_UPDATE_SENDER.send(room_id.to_owned());
for user_id in &removable {
if user_id.is_local() {
federation_send(room_id, user_id, false).await.ok();
}
}
}
Ok(())
}
/// Returns the count of the last typing update in this room.
pub async fn last_typing_update(room_id: &RoomId) -> AppResult<i64> {
maintain_typings(room_id).await?;
Ok(LAST_TYPING_UPDATE
.read()
.await
.get(room_id)
.copied()
.unwrap_or_default())
}
/// Returns a new typing EDU.
pub async fn all_typings(
room_id: &RoomId,
) -> AppResult<SyncEphemeralRoomEvent<TypingEventContent>> {
Ok(SyncEphemeralRoomEvent {
content: TypingEventContent {
user_ids: TYPING
.read()
.await
.get(room_id)
.map(|m| m.keys().cloned().collect())
.unwrap_or_default(),
},
})
}
async fn federation_send(room_id: &RoomId, user_id: &UserId, typing: bool) -> AppResult<()> {
debug_assert!(
user_id.is_local(),
"tried to broadcast typing status of remote user",
);
if !crate::config::get().typing.allow_outgoing {
return Ok(());
}
let content = TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing);
let edu = Edu::Typing(content);
sending::send_edu_room(room_id, &edu)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/auth_chain.rs | crates/server/src/room/auth_chain.rs | use std::collections::{BTreeSet, HashSet, VecDeque};
use std::fmt::Debug;
use std::sync::{Arc, LazyLock, Mutex};
use std::time::Instant;
use diesel::prelude::*;
use lru_cache::LruCache;
use crate::core::Seqnum;
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::schema::*;
use crate::room::timeline;
use crate::{AppResult, MatrixError};
type Bucket<'a> = BTreeSet<(Seqnum, &'a EventId)>;
static AUTH_CHAIN_CACHE: LazyLock<Mutex<LruCache<Vec<i64>, Arc<Vec<Seqnum>>>>> =
LazyLock::new(|| Mutex::new(LruCache::new(100_000)));
pub fn get_auth_chain_ids<'a, I>(
room_id: &'a RoomId,
starting_event_ids: I,
) -> AppResult<Vec<OwnedEventId>>
where
I: Iterator<Item = &'a EventId> + Clone + Debug + Send,
{
let chain_sns = get_auth_chain_sns(room_id, starting_event_ids)?;
let full_auth_chain = events::table
.filter(events::sn.eq_any(&chain_sns))
.order_by(events::sn.asc())
.select(events::id)
.load::<OwnedEventId>(&mut connect()?)?;
Ok(full_auth_chain)
}
pub fn get_auth_chain_sns<'a, I>(
room_id: &'a RoomId,
starting_event_ids: I,
) -> AppResult<Vec<Seqnum>>
where
I: Iterator<Item = &'a EventId> + Clone + Debug + Send,
{
const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db?
const BUCKET: Bucket<'_> = BTreeSet::new();
let started = Instant::now();
let starting_events = events::table
.filter(events::id.eq_any(starting_event_ids.clone()))
.filter(events::sn.is_not_null())
.select((events::id, events::sn))
.load::<(OwnedEventId, Seqnum)>(&mut connect()?)?
.into_iter()
.collect::<Vec<_>>();
let mut buckets = [BUCKET; NUM_BUCKETS];
for (event_id, event_sn) in &starting_events {
let index = event_sn % NUM_BUCKETS as i64;
buckets[index as usize].insert((*event_sn, event_id));
}
debug!(
starting_events = ?starting_events.len(),
elapsed = ?started.elapsed(),
"start",
);
let mut full_auth_chain: Vec<Seqnum> = Vec::with_capacity(starting_events.len());
for bucket in buckets {
let bucket_key: Vec<Seqnum> = bucket.iter().map(|i| i.0).collect();
if bucket_key.is_empty() {
continue;
}
if let Ok(Some(cached)) = get_cached_auth_chain(&bucket_key) {
full_auth_chain.extend(cached.to_vec());
continue;
}
let mut bucket_cache: Vec<_> = vec![];
for (event_sn, event_id) in bucket {
if let Ok(Some(cached)) = get_cached_auth_chain(&[event_sn]) {
bucket_cache.extend(cached.to_vec());
continue;
}
let auth_chain = get_event_auth_chain(room_id, event_id)?;
let _ = cache_auth_chain(vec![event_sn], auth_chain.as_slice());
bucket_cache.extend(auth_chain);
debug!(
?event_id,
elapsed = ?started.elapsed(),
"Cache missed event"
);
}
let _ = cache_auth_chain(bucket_key, bucket_cache.as_slice());
debug!(
bucket_cache_length = ?bucket_cache.len(),
elapsed = ?started.elapsed(),
"Cache missed bucket",
);
full_auth_chain.extend(bucket_cache);
}
full_auth_chain.sort_unstable();
full_auth_chain.dedup();
debug!(
chain_length = ?full_auth_chain.len(),
elapsed = ?started.elapsed(),
"done",
);
Ok(full_auth_chain)
}
#[tracing::instrument(level = "trace", skip(room_id))]
fn get_event_auth_chain(room_id: &RoomId, event_id: &EventId) -> AppResult<Vec<Seqnum>> {
let mut todo: VecDeque<_> = [event_id.to_owned()].into();
let mut found = HashSet::new();
while let Some(event_id) = todo.pop_front() {
trace!(?event_id, "processing auth event");
let pdu = timeline::get_pdu(&event_id)?;
if pdu.room_id != room_id {
tracing::error!(
?event_id,
?room_id,
wrong_room_id = ?pdu.room_id,
"auth event for incorrect room"
);
return Err(MatrixError::forbidden("auth event for incorrect room", None).into());
}
let auth_events = events::table
.filter(events::sn.is_not_null())
.filter(events::id.eq_any(pdu.auth_events.iter().map(|e| &**e)))
.select((events::id, events::sn))
.load::<(OwnedEventId, Seqnum)>(&mut connect()?)?;
for (auth_event_id, auth_event_sn) in auth_events {
if found.insert(auth_event_sn) {
tracing::trace!(
?auth_event_id,
?auth_event_sn,
"adding auth event to processing queue"
);
todo.push_back(auth_event_id);
}
}
}
Ok(found.into_iter().collect())
}
fn get_cached_auth_chain(cache_key: &[Seqnum]) -> AppResult<Option<Arc<Vec<Seqnum>>>> {
// Check RAM cache
if let Some(result) = AUTH_CHAIN_CACHE.lock().unwrap().get_mut(cache_key) {
return Ok(Some(Arc::clone(result)));
}
let chain_sns = event_auth_chains::table
.find(cache_key)
.select(event_auth_chains::chain_sns)
.first::<Vec<Option<Seqnum>>>(&mut connect()?)
.optional()?;
if let Some(chain_sns) = chain_sns {
let chain_sns: Arc<Vec<Seqnum>> = Arc::new(chain_sns.into_iter().flatten().collect());
// Cache in RAM
AUTH_CHAIN_CACHE
.lock()
.unwrap()
.insert(cache_key.to_owned(), chain_sns.clone());
return Ok(Some(chain_sns));
}
Ok(None)
}
pub fn cache_auth_chain(cache_key: Vec<Seqnum>, chain_sns: &[Seqnum]) -> AppResult<()> {
diesel::insert_into(event_auth_chains::table)
.values((
event_auth_chains::cache_key.eq(&cache_key),
event_auth_chains::chain_sns.eq(chain_sns),
))
.on_conflict(event_auth_chains::cache_key)
.do_update()
.set(event_auth_chains::chain_sns.eq(chain_sns))
.execute(&mut connect()?)
.ok();
let chain_sns = chain_sns.to_vec();
// Cache in RAM
AUTH_CHAIN_CACHE
.lock()
.unwrap()
.insert(cache_key, Arc::new(chain_sns));
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/space.rs | crates/server/src/room/space.rs | use std::collections::{BTreeSet, VecDeque};
use std::str::FromStr;
use std::sync::{LazyLock, Mutex, MutexGuard};
use lru_cache::LruCache;
use crate::core::client::space::SpaceHierarchyRoomsChunk;
use crate::core::events::room::join_rule::RoomJoinRulesEventContent;
use crate::core::events::space::child::HierarchySpaceChildEvent;
use crate::core::events::{StateEventType, space::child::SpaceChildEventContent};
use crate::core::federation::space::{
HierarchyReqArgs, HierarchyResBody, SpaceHierarchyChildSummary, SpaceHierarchyParentSummary,
hierarchy_request,
};
use crate::core::identifiers::*;
use crate::core::room::{JoinRule, RoomType};
use crate::core::serde::RawJson;
use crate::core::{self, OwnedRoomId, RoomId, UserId, space::SpaceRoomJoinRule};
use crate::event::handler;
use crate::room::state;
use crate::{AppResult, GetUrlOrigin, MatrixError};
mod pagination_token;
pub use pagination_token::PaginationToken;
use super::state::get_full_state;
type CacheItem = LruCache<(OwnedRoomId, bool), Option<CachedSpaceHierarchySummary>>;
pub static ROOM_ID_SPACE_CHUNK_CACHE: LazyLock<Mutex<CacheItem>> =
LazyLock::new(|| Mutex::new(LruCache::new(100)));
pub struct CachedSpaceHierarchySummary {
summary: SpaceHierarchyParentSummary,
}
#[derive(Clone, Debug)]
pub enum SummaryAccessibility {
Accessible(SpaceHierarchyParentSummary),
Inaccessible,
}
/// Identifier used to check if rooms are accessible. None is used if you want
/// to return the room, no matter if accessible or not
#[derive(Debug)]
pub enum Identifier<'a> {
UserId(&'a UserId),
ServerName(&'a ServerName),
}
/// Gets the summary of a space using solely local information
pub async fn get_summary_and_children_local(
current_room: &RoomId,
identifier: &Identifier<'_>,
suggested_only: bool,
) -> AppResult<Option<SummaryAccessibility>> {
match ROOM_ID_SPACE_CHUNK_CACHE
.lock()
.unwrap()
.get_mut(&(current_room.to_owned(), suggested_only))
.as_ref()
{
None => (), // cache miss
Some(None) => return Ok(None),
Some(Some(cached)) => {
let accessibility = if is_accessible_child(
current_room,
&cached.summary.join_rule,
identifier,
&cached.summary.allowed_room_ids,
) {
SummaryAccessibility::Accessible(cached.summary.clone())
} else {
SummaryAccessibility::Inaccessible
};
return Ok(Some(accessibility));
}
}
let children_pdus: Vec<_> = get_stripped_space_child_events(current_room, suggested_only)?;
let Ok(summary) = get_room_summary(current_room, children_pdus, identifier).await else {
return Ok(None);
};
ROOM_ID_SPACE_CHUNK_CACHE.lock().unwrap().insert(
(current_room.to_owned(), suggested_only),
Some(CachedSpaceHierarchySummary {
summary: summary.clone(),
}),
);
Ok(Some(SummaryAccessibility::Accessible(summary)))
}
/// Gets the summary of a space using solely federation
#[tracing::instrument(level = "debug")]
async fn get_summary_and_children_federation(
current_room: &RoomId,
suggested_only: bool,
user_id: &UserId,
via: &[OwnedServerName],
) -> AppResult<Option<SummaryAccessibility>> {
let mut res_body = None;
for server in via {
let request = hierarchy_request(
&server.origin().await,
HierarchyReqArgs {
room_id: current_room.to_owned(),
suggested_only,
},
)?
.into_inner();
if let Ok(response) = crate::sending::send_federation_request(server, request, None).await
&& let Ok(body) = response.json::<HierarchyResBody>().await
{
ROOM_ID_SPACE_CHUNK_CACHE.lock().unwrap().insert(
(current_room.to_owned(), suggested_only),
Some(CachedSpaceHierarchySummary {
summary: body.room.clone(),
}),
);
res_body = Some(body);
}
if res_body.is_some() {
break;
}
}
let Some(res_body) = res_body else {
return Ok(None);
};
res_body
.children
.into_iter()
.filter_map(|child| {
if let Ok(mut cache) = ROOM_ID_SPACE_CHUNK_CACHE.lock() {
if !cache.contains_key(&(current_room.to_owned(), suggested_only)) {
Some((child, cache))
} else {
None
}
} else {
None
}
})
.for_each(|(child, cache)| cache_insert(cache, current_room, child, suggested_only));
let summary = res_body.room;
let identifier = Identifier::UserId(user_id);
let is_accessible_child = is_accessible_child(
current_room,
&summary.join_rule,
&identifier,
&summary.allowed_room_ids,
);
if is_accessible_child {
return Ok(Some(SummaryAccessibility::Accessible(summary)));
}
Ok(Some(SummaryAccessibility::Inaccessible))
}
/// Simply returns the stripped m.space.child events of a room
fn get_stripped_space_child_events(
room_id: &RoomId,
suggested_only: bool,
) -> AppResult<Vec<RawJson<HierarchySpaceChildEvent>>> {
let frame_id = super::get_frame_id(room_id, None)?;
let child_events = get_full_state(frame_id)?
.into_iter()
.filter_map(|((state_event_type, state_key), pdu)| {
if state_event_type == StateEventType::SpaceChild {
let content = pdu.get_content::<SpaceChildEventContent>().ok()?;
if content.via.is_empty() || (suggested_only && !content.suggested) {
return None;
}
if RoomId::parse(&state_key).is_ok() {
return Some(pdu.to_stripped_space_child_event());
}
}
None
})
.collect();
Ok(child_events)
}
/// Gets the summary of a space using either local or remote (federation)
/// sources
pub async fn get_summary_and_children_client(
current_room: &OwnedRoomId,
suggested_only: bool,
user_id: &UserId,
via: &[OwnedServerName],
) -> AppResult<Option<SummaryAccessibility>> {
let identifier = Identifier::UserId(user_id);
if let Ok(Some(response)) =
get_summary_and_children_local(current_room, &identifier, suggested_only).await
{
return Ok(Some(response));
}
get_summary_and_children_federation(current_room, suggested_only, user_id, via).await
}
async fn get_room_summary(
room_id: &RoomId,
children_state: Vec<RawJson<HierarchySpaceChildEvent>>,
identifier: &Identifier<'_>,
) -> AppResult<SpaceHierarchyParentSummary> {
let join_rule = super::get_state_content::<RoomJoinRulesEventContent>(
room_id,
&StateEventType::RoomJoinRules,
"",
None,
)
.map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule);
let allowed_room_ids = state::allowed_room_ids(join_rule.clone());
let join_rule: SpaceRoomJoinRule = join_rule.clone().into();
let is_accessible_child =
is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids);
if !is_accessible_child {
return Err(MatrixError::forbidden("User is not allowed to see the room", None).into());
}
let name = super::get_name(room_id).ok();
let topic = super::get_topic(room_id).ok();
let room_type = super::get_room_type(room_id).ok().flatten();
let world_readable = super::is_world_readable(room_id);
let guest_can_join = super::guest_can_join(room_id);
let num_joined_members = super::joined_member_count(room_id).unwrap_or(0);
let canonical_alias = super::get_canonical_alias(room_id).ok().flatten();
let avatar_url = super::get_avatar_url(room_id).ok().flatten();
let room_version = super::get_version(room_id).ok();
let encryption = super::get_encryption(room_id).ok();
Ok(SpaceHierarchyParentSummary {
canonical_alias,
name,
topic,
world_readable,
guest_can_join,
avatar_url,
room_type,
children_state,
allowed_room_ids,
join_rule,
room_id: room_id.to_owned(),
num_joined_members,
room_version,
encryption,
})
}
pub async fn get_room_hierarchy(
sender_id: &UserId,
args: &core::client::space::HierarchyReqArgs,
) -> AppResult<core::client::space::HierarchyResBody> {
type Entry = (OwnedRoomId, Vec<OwnedServerName>);
type RoomDeque = VecDeque<Entry>;
let limit = args.limit.unwrap_or(50).min(50);
let max_depth = args.max_depth.unwrap_or(usize::MAX);
let pagination_token = args
.from
.as_ref()
.and_then(|s| PaginationToken::from_str(s).ok());
// Should prevent unexpeded behaviour in (bad) clients
if let Some(token) = &pagination_token
&& (token.suggested_only != args.suggested_only || token.max_depth != max_depth)
{
return Err(MatrixError::invalid_param(
"suggested_only and max_depth cannot change on paginated requests",
)
.into());
}
let room_sns = pagination_token.map(|p| p.room_sns).unwrap_or_default();
let room_id = &args.room_id;
let suggested_only = args.suggested_only;
let mut queue: RoomDeque =
[(room_id.to_owned(), vec![crate::room::server_name(room_id)?])].into();
let mut rooms = Vec::with_capacity(limit);
let mut parents = BTreeSet::new();
while let Some((current_room, via)) = queue.pop_front() {
let summary = match crate::room::space::get_summary_and_children_client(
¤t_room,
suggested_only,
sender_id,
&via,
)
.await
{
Ok(summary) => summary,
Err(e) => {
error!("failed to get space summary for {}: {}", current_room, e);
None
}
};
match (summary, ¤t_room == room_id) {
(None | Some(SummaryAccessibility::Inaccessible), false) => {
// Just ignore other unavailable rooms
}
(None, true) => {
return Err(
MatrixError::forbidden("the requested room was not found", None).into(),
);
}
(Some(SummaryAccessibility::Inaccessible), true) => {
return Err(
MatrixError::forbidden("the requested room is inaccessible", None).into(),
);
}
(Some(SummaryAccessibility::Accessible(summary)), _) => {
let populate = parents.len() >= room_sns.len();
let mut children = Vec::new();
let room_type = crate::room::get_room_type(¤t_room).ok();
if room_type.is_none() || Some(Some(RoomType::Space)) == room_type {
children = get_parent_children_via(&summary, suggested_only)
.into_iter()
.filter(|(room, _)| !parents.contains(room))
.collect::<Vec<Entry>>();
}
if populate {
rooms.push(summary_to_chunk(summary.clone()));
} else if queue.is_empty() && children.is_empty() {
break;
}
if rooms.len() >= limit {
break;
}
parents.insert(current_room.clone());
if parents.len() > max_depth {
continue;
}
for child in children.into_iter().rev() {
queue.push_front(child);
}
}
}
}
let next_batch = if let Some((room, _)) = queue.pop_front() {
parents.insert(room);
let next_room_sns: Vec<_> = parents
.iter()
.filter_map(|room_id| crate::room::get_room_sn(room_id).ok())
.collect();
if !next_room_sns.is_empty() && next_room_sns.iter().ne(&room_sns) {
Some(
PaginationToken {
room_sns: next_room_sns,
limit,
max_depth,
suggested_only,
}
.to_string(),
)
} else {
None
}
} else {
None
};
Ok(core::client::space::HierarchyResBody { next_batch, rooms })
}
/// With the given identifier, checks if a room is accessable
fn is_accessible_child(
current_room: &RoomId,
join_rule: &SpaceRoomJoinRule,
identifier: &Identifier<'_>,
allowed_room_ids: &[OwnedRoomId],
) -> bool {
// Checks if ACLs allow for the server to participate
if let Identifier::ServerName(server_name) = identifier
&& handler::acl_check(server_name, current_room).is_err()
{
return false;
}
if let Identifier::UserId(user_id) = identifier
&& (crate::room::user::is_joined(user_id, current_room).unwrap_or(false)
|| crate::room::user::is_invited(user_id, current_room).unwrap_or(false))
{
return true;
}
match join_rule {
SpaceRoomJoinRule::Public
| SpaceRoomJoinRule::Knock
| SpaceRoomJoinRule::KnockRestricted => true,
SpaceRoomJoinRule::Restricted => allowed_room_ids.iter().any(|room| match identifier {
Identifier::UserId(user) => crate::room::user::is_joined(user, room).unwrap_or(false),
Identifier::ServerName(server) => {
crate::room::is_server_joined(server, room).unwrap_or(false)
}
}),
// Invite only, Private, or Custom join rule
_ => false,
}
}
/// Returns the children of a SpaceHierarchyParentSummary, making use of the
/// children_state field
pub fn get_parent_children_via(
parent: &SpaceHierarchyParentSummary,
suggested_only: bool,
) -> Vec<(OwnedRoomId, Vec<OwnedServerName>)> {
parent
.children_state
.iter()
.filter_map(|state| {
if let Ok(ce) = RawJson::deserialize(state) {
(!suggested_only || ce.content.suggested).then_some((ce.state_key, ce.content.via))
} else {
None
}
})
.collect()
}
fn cache_insert(
mut cache: MutexGuard<'_, CacheItem>,
current_room: &RoomId,
child: SpaceHierarchyChildSummary,
suggested_only: bool,
) {
let SpaceHierarchyChildSummary {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
allowed_room_ids,
room_version,
encryption,
} = child;
let summary = SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
allowed_room_ids,
room_id: room_id.clone(),
children_state: get_stripped_space_child_events(&room_id, suggested_only)
.unwrap_or_default(),
room_version,
encryption,
};
cache.insert(
(current_room.to_owned(), suggested_only),
Some(CachedSpaceHierarchySummary { summary }),
);
}
// Here because cannot implement `From` across palpo-federation-api and
// palpo-client-api types
impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
fn from(value: CachedSpaceHierarchySummary) -> Self {
let SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
encryption,
room_version,
allowed_room_ids,
} = value.summary;
Self {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
encryption,
room_version,
allowed_room_ids,
}
}
}
/// Here because cannot implement `From` across palpo-federation-api and
/// palpo-client-api types
#[must_use]
pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk {
let SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
encryption,
room_version,
allowed_room_ids,
} = summary;
SpaceHierarchyRoomsChunk {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
encryption,
room_version,
allowed_room_ids,
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/current.rs | crates/server/src/room/current.rs | use diesel::prelude::*;
use crate::AppResult;
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::schema::*;
#[derive(Insertable, Identifiable, Queryable, Debug, Clone)]
#[diesel(table_name = stats_room_currents, primary_key(room_id))]
pub struct RoomCurrent {
pub room_id: OwnedRoomId,
pub state_events: i64,
pub joined_members: i64,
pub invited_members: i64,
pub left_members: i64,
pub banned_members: i64,
pub knocked_members: i64,
pub local_users_in_room: i64,
pub completed_delta_stream_id: i64,
}
#[tracing::instrument]
pub fn get_current(room_id: &RoomId) -> AppResult<Option<RoomCurrent>> {
stats_room_currents::table
.filter(stats_room_currents::room_id.eq(room_id))
.first::<RoomCurrent>(&mut connect()?)
.optional()
.map_err(Into::into)
}
#[tracing::instrument]
pub fn invite_count(room_id: &RoomId, user_id: &UserId) -> AppResult<Option<u64>> {
let count = stats_room_currents::table
.filter(stats_room_currents::room_id.eq(room_id))
.select(stats_room_currents::invited_members)
.first::<i64>(&mut connect()?)
.optional()?;
Ok(count.map(|c| c as u64))
}
#[tracing::instrument]
pub fn left_count(room_id: &RoomId, user_id: &UserId) -> AppResult<Option<u64>> {
let count = stats_room_currents::table
.filter(stats_room_currents::room_id.eq(room_id))
.select(stats_room_currents::left_members)
.first::<i64>(&mut connect()?)
.optional()?;
Ok(count.map(|c| c as u64))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/directory.rs | crates/server/src/room/directory.rs | use diesel::prelude::*;
use crate::AppResult;
use crate::core::RoomId;
use crate::core::room::Visibility;
use crate::data::connect;
use crate::data::schema::*;
#[tracing::instrument]
pub fn set_public(room_id: &RoomId, value: bool) -> AppResult<()> {
diesel::update(rooms::table.find(room_id))
.set(rooms::is_public.eq(value))
.execute(&mut connect()?)?;
Ok(())
}
#[tracing::instrument]
pub fn is_public(room_id: &RoomId) -> AppResult<bool> {
rooms::table
.find(room_id)
.select(rooms::is_public)
.first::<bool>(&mut connect()?)
.map_err(Into::into)
}
#[tracing::instrument]
pub fn visibility(room_id: &RoomId) -> Visibility {
if is_public(room_id).unwrap_or(false) {
Visibility::Public
} else {
Visibility::Private
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/thread.rs | crates/server/src/room/thread.rs | use diesel::prelude::*;
use serde_json::json;
use crate::core::client::room::IncludeThreads;
use crate::core::events::relation::BundledThread;
use crate::core::identifiers::*;
use crate::core::serde::CanonicalJsonValue;
use crate::data::connect;
use crate::data::room::DbThread;
use crate::data::schema::*;
use crate::room::timeline;
use crate::{AppResult, SnPduEvent};
pub fn get_threads(
room_id: &RoomId,
_include: &IncludeThreads,
limit: i64,
from_token: Option<i64>,
) -> AppResult<(Vec<(OwnedEventId, SnPduEvent)>, Option<i64>)> {
let items = if let Some(from_token) = from_token {
threads::table
.filter(threads::room_id.eq(room_id))
.filter(threads::event_sn.le(from_token))
.select((threads::event_id, threads::event_sn))
.order_by(threads::last_sn.desc())
.limit(limit)
.load::<(OwnedEventId, i64)>(&mut connect()?)?
} else {
threads::table
.filter(threads::room_id.eq(room_id))
.select((threads::event_id, threads::event_sn))
.order_by(threads::last_sn.desc())
.limit(limit)
.load::<(OwnedEventId, i64)>(&mut connect()?)?
};
let next_token = items.last().map(|(_, sn)| *sn - 1);
let mut events = Vec::with_capacity(items.len());
for (event_id, _) in items {
if let Ok(pdu) = timeline::get_pdu(&event_id) {
events.push((event_id, pdu));
}
}
Ok((events, next_token))
}
pub fn add_to_thread(thread_id: &EventId, pdu: &SnPduEvent) -> AppResult<()> {
let (root_pdu, mut root_pdu_json) = timeline::get_pdu_and_data(thread_id)?;
if let CanonicalJsonValue::Object(unsigned) = root_pdu_json
.entry("unsigned".to_owned())
.or_insert_with(|| CanonicalJsonValue::Object(Default::default()))
{
if let Some(mut relations) = unsigned
.get("m.relations")
.and_then(|r| r.as_object())
.and_then(|r| r.get("m.thread"))
.and_then(|relations| {
serde_json::from_value::<BundledThread>(relations.clone().into()).ok()
})
{
// Thread already existed
relations.count += 1;
relations.latest_event = pdu.to_message_like_event();
let content = serde_json::to_value(relations).expect("to_value always works");
unsigned.insert(
"m.relations".to_owned(),
json!({ "m.thread": content })
.try_into()
.expect("thread is valid json"),
);
} else {
// New thread
let relations = BundledThread {
latest_event: pdu.to_message_like_event(),
count: 1,
current_user_participated: true,
};
let content = serde_json::to_value(relations).expect("to_value always works");
unsigned.insert(
"m.relations".to_owned(),
json!({ "m.thread": content })
.try_into()
.expect("thread is valid json"),
);
}
timeline::replace_pdu(thread_id, &root_pdu_json)?;
}
diesel::update(event_points::table.find(&pdu.event_id))
.set(event_points::thread_id.eq(thread_id))
.execute(&mut connect()?)?;
diesel::insert_into(threads::table)
.values(DbThread {
event_id: root_pdu.event_id.clone(),
event_sn: root_pdu.event_sn,
room_id: root_pdu.room_id.clone(),
last_id: pdu.event_id.clone(),
last_sn: pdu.event_sn,
})
.on_conflict(threads::event_id)
.do_update()
.set((
threads::last_id.eq(&pdu.event_id),
threads::last_sn.eq(pdu.event_sn),
))
.execute(&mut connect()?)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/state/field.rs | crates/server/src/room/state/field.rs | use diesel::prelude::*;
use crate::AppResult;
use crate::core::events::StateEventType;
use crate::data::connect;
use crate::data::schema::*;
#[derive(Identifiable, Queryable, Debug, Clone)]
#[diesel(table_name = room_state_fields)]
pub struct DbRoomStateField {
pub id: i64,
pub event_ty: StateEventType,
pub state_key: String,
}
pub fn get_field(field_id: i64) -> AppResult<DbRoomStateField> {
room_state_fields::table
.find(field_id)
.first::<DbRoomStateField>(&mut connect()?)
.map_err(Into::into)
}
pub fn get_field_id(event_ty: &StateEventType, state_key: &str) -> AppResult<i64> {
room_state_fields::table
.filter(room_state_fields::event_ty.eq(event_ty))
.filter(room_state_fields::state_key.eq(state_key))
.select(room_state_fields::id)
.first::<i64>(&mut connect()?)
.map_err(Into::into)
}
pub fn ensure_field_id(event_ty: &StateEventType, state_key: &str) -> AppResult<i64> {
let id = diesel::insert_into(room_state_fields::table)
.values((
room_state_fields::event_ty.eq(event_ty),
room_state_fields::state_key.eq(state_key),
))
.on_conflict_do_nothing()
.returning(room_state_fields::id)
.get_result::<i64>(&mut connect()?)
.optional()?;
if let Some(id) = id {
Ok(id)
} else {
room_state_fields::table
.filter(room_state_fields::event_ty.eq(event_ty))
.filter(room_state_fields::state_key.eq(state_key))
.select(room_state_fields::id)
.first::<i64>(&mut connect()?)
.map_err(Into::into)
}
}
pub fn ensure_field(event_ty: &StateEventType, state_key: &str) -> AppResult<DbRoomStateField> {
let id = diesel::insert_into(room_state_fields::table)
.values((
room_state_fields::event_ty.eq(event_ty),
room_state_fields::state_key.eq(state_key),
))
.on_conflict_do_nothing()
.returning(room_state_fields::id)
.get_result::<i64>(&mut connect()?)
.optional()?;
if let Some(id) = id {
room_state_fields::table
.find(id)
.first::<DbRoomStateField>(&mut connect()?)
.map_err(Into::into)
} else {
room_state_fields::table
.filter(room_state_fields::event_ty.eq(event_ty))
.filter(room_state_fields::state_key.eq(state_key))
.first::<DbRoomStateField>(&mut connect()?)
.map_err(Into::into)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/state/diff.rs | crates/server/src/room/state/diff.rs | use std::collections::BTreeSet;
use std::mem::size_of;
use std::ops::Deref;
use std::sync::Arc;
use diesel::prelude::*;
use super::{DbRoomStateDelta, FrameInfo, room_state_deltas};
use crate::core::{OwnedEventId, RoomId, Seqnum};
use crate::data::connect;
use crate::{AppResult, utils};
pub struct StateDiff {
pub parent_id: Option<i64>,
pub appended: Arc<CompressedState>,
pub disposed: Arc<CompressedState>,
}
#[derive(Clone, Default)]
pub struct DeltaInfo {
pub frame_id: i64,
pub appended: Arc<CompressedState>,
pub disposed: Arc<CompressedState>,
}
pub type CompressedState = BTreeSet<CompressedEvent>;
#[derive(Eq, Ord, Hash, PartialEq, PartialOrd, Copy, Debug, Clone)]
pub struct CompressedEvent([u8; 2 * size_of::<i64>()]);
impl CompressedEvent {
pub fn new(field_id: i64, event_sn: Seqnum) -> Self {
let mut v = field_id.to_be_bytes().to_vec();
v.extend_from_slice(&event_sn.to_be_bytes());
Self(v.try_into().expect("we checked the size above"))
}
pub fn field_id(&self) -> i64 {
utils::i64_from_bytes(&self.0[0..size_of::<i64>()]).expect("bytes have right length")
}
pub fn event_sn(&self) -> Seqnum {
utils::i64_from_bytes(&self.0[size_of::<i64>()..]).expect("bytes have right length")
}
/// Returns state_key_id, event id
pub fn split(&self) -> AppResult<(i64, OwnedEventId)> {
Ok((
utils::i64_from_bytes(&self[0..size_of::<i64>()]).expect("bytes have right length"),
crate::event::get_event_id_by_sn(
utils::i64_from_bytes(&self[size_of::<i64>()..]).expect("bytes have right length"),
)?,
))
}
pub fn as_bytes(&self) -> &[u8] {
&self.0
}
}
impl Deref for CompressedEvent {
type Target = [u8; 2 * size_of::<i64>()];
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub fn compress_events(
room_id: &RoomId,
events: impl Iterator<Item = (i64, Seqnum)>,
) -> AppResult<CompressedState> {
let mut compressed = BTreeSet::new();
for (field_id, event_sn) in events {
compressed.insert(compress_event(room_id, field_id, event_sn)?);
}
Ok(compressed)
}
pub fn compress_event(
_room_id: &RoomId,
field_id: i64,
event_sn: Seqnum,
) -> AppResult<CompressedEvent> {
Ok(CompressedEvent::new(field_id, event_sn))
}
pub fn get_detla(frame_id: i64) -> AppResult<DbRoomStateDelta> {
room_state_deltas::table
.find(frame_id)
.first::<DbRoomStateDelta>(&mut connect()?)
.map_err(Into::into)
}
pub fn load_state_diff(frame_id: i64) -> AppResult<StateDiff> {
let DbRoomStateDelta {
parent_id,
appended,
disposed,
..
} = room_state_deltas::table
.find(frame_id)
.first::<DbRoomStateDelta>(&mut connect()?)?;
Ok(StateDiff {
parent_id,
appended: Arc::new(
appended
.chunks_exact(size_of::<CompressedEvent>())
.map(|chunk| CompressedEvent(chunk.try_into().expect("we checked the size above")))
.collect(),
),
disposed: Arc::new(
disposed
.chunks_exact(size_of::<CompressedEvent>())
.map(|chunk| CompressedEvent(chunk.try_into().expect("we checked the size above")))
.collect(),
),
})
}
pub fn save_state_delta(room_id: &RoomId, frame_id: i64, diff: StateDiff) -> AppResult<()> {
let StateDiff {
parent_id,
appended,
disposed,
} = diff;
diesel::insert_into(room_state_deltas::table)
.values(DbRoomStateDelta {
frame_id,
room_id: room_id.to_owned(),
parent_id,
appended: appended
.iter()
.flat_map(|event| event.as_bytes())
.cloned()
.collect::<Vec<_>>(),
disposed: disposed
.iter()
.flat_map(|event| event.as_bytes())
.cloned()
.collect::<Vec<_>>(),
})
.on_conflict_do_nothing()
.execute(&mut connect()?)?;
Ok(())
}
/// Creates a new state_hash that often is just a diff to an already existing
/// state_hash and therefore very efficient.
///
/// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer
/// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0
/// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's
/// based on layer n-2. If that layer is also too big, it will recursively fix above layers too.
///
/// * `point_id` - Shortstate_hash of this state
/// * `appended` - Added to base. Each vec is state_key_id+shorteventid
/// * `disposed` - Removed from base. Each vec is state_key_id+shorteventid
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
/// * `parent_states` - A stack with info on state_hash, full state, added diff and removed diff for each parent layer
#[tracing::instrument(skip(appended, disposed, diff_to_sibling, parent_states))]
pub fn calc_and_save_state_delta(
room_id: &RoomId,
frame_id: i64,
appended: Arc<CompressedState>,
disposed: Arc<CompressedState>,
diff_to_sibling: usize,
mut parent_states: Vec<FrameInfo>,
) -> AppResult<()> {
let diff_sum = appended.len() + disposed.len();
if parent_states.len() > 3 {
// Number of layers
// To many layers, we have to go deeper
let parent = parent_states.pop().unwrap();
let mut parent_appended = (*parent.appended).clone();
let mut parent_disposed = (*parent.disposed).clone();
for item in disposed.iter() {
if !parent_appended.remove(item) {
// It was not added in the parent and we removed it
parent_disposed.insert(*item);
}
// Else it was added in the parent and we removed it again. We can forget this change
}
for item in appended.iter() {
if !parent_disposed.remove(item) {
// It was not touched in the parent and we added it
parent_appended.insert(*item);
}
// Else it was removed in the parent and we added it again. We can forget this change
}
return calc_and_save_state_delta(
room_id,
frame_id,
Arc::new(parent_appended),
Arc::new(parent_disposed),
diff_sum,
parent_states,
);
}
if parent_states.is_empty() {
// There is no parent layer, create a new state
return save_state_delta(
room_id,
frame_id,
StateDiff {
parent_id: None,
appended,
disposed,
},
);
}
// Else we have two options.
// 1. We add the current diff on top of the parent layer.
// 2. We replace a layer above
let parent = parent_states.pop().unwrap();
let parent_diff = parent.appended.len() + parent.disposed.len();
if diff_sum * diff_sum >= 2 * diff_to_sibling * parent_diff {
// Diff too big, we replace above layer(s)
let mut parent_appended = (*parent.appended).clone();
let mut parent_disposed = (*parent.disposed).clone();
for item in disposed.iter() {
if !parent_appended.remove(item) {
// It was not added in the parent and we removed it
parent_disposed.insert(*item);
}
// Else it was added in the parent and we removed it again. We can forget this change
}
for item in appended.iter() {
if !parent_disposed.remove(item) {
// It was not touched in the parent and we added it
parent_appended.insert(*item);
}
// Else it was removed in the parent and we added it again. We can forget this change
}
calc_and_save_state_delta(
room_id,
frame_id,
Arc::new(parent_appended),
Arc::new(parent_disposed),
diff_sum,
parent_states,
)
} else {
// Diff small enough, we add diff as layer on top of parent
save_state_delta(
room_id,
frame_id,
StateDiff {
parent_id: Some(parent.frame_id),
appended,
disposed,
},
)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/state/frame.rs | crates/server/src/room/state/frame.rs | use std::sync::{Arc, LazyLock, Mutex};
use diesel::prelude::*;
use lru_cache::LruCache;
use super::{CompressedState, StateDiff};
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::schema::*;
use crate::{AppResult, MatrixError};
pub static STATE_INFO_CACHE: LazyLock<Mutex<LruCache<i64, Vec<FrameInfo>>>> =
LazyLock::new(|| Mutex::new(LruCache::new(100_000)));
#[derive(Clone, Default)]
pub struct FrameInfo {
pub frame_id: i64,
pub full_state: Arc<CompressedState>,
pub appended: Arc<CompressedState>,
pub disposed: Arc<CompressedState>,
}
/// Returns a stack with info on state_hash, full state, added diff and removed diff for the selected state_hash and each parent layer.
pub fn load_frame_info(frame_id: i64) -> AppResult<Vec<FrameInfo>> {
if let Some(r) = STATE_INFO_CACHE.lock().unwrap().get_mut(&frame_id) {
return Ok(r.clone());
}
let StateDiff {
parent_id,
appended,
disposed,
} = super::load_state_diff(frame_id)?;
if let Some(parent_id) = parent_id {
let mut info = load_frame_info(parent_id)?;
let mut full_state = (*info.last().expect("at least one frame").full_state).clone();
full_state.extend(appended.iter().copied());
let disposed = (*disposed).clone();
for r in &disposed {
full_state.remove(r);
}
info.push(FrameInfo {
frame_id,
full_state: Arc::new(full_state),
appended,
disposed: Arc::new(disposed),
});
STATE_INFO_CACHE
.lock()
.unwrap()
.insert(frame_id, info.clone());
Ok(info)
} else {
let info = vec![FrameInfo {
frame_id,
full_state: appended.clone(),
appended,
disposed,
}];
STATE_INFO_CACHE
.lock()
.unwrap()
.insert(frame_id, info.clone());
Ok(info)
}
}
pub fn get_room_frame_id(room_id: &RoomId, until_sn: Option<i64>) -> AppResult<i64> {
let frame_id = if let Some(until_sn) = until_sn {
event_points::table
.filter(event_points::room_id.eq(room_id))
.filter(event_points::event_sn.le(until_sn))
.filter(event_points::frame_id.is_not_null())
.select(event_points::frame_id)
.order(event_points::event_sn.desc())
.first::<Option<i64>>(&mut connect()?)?
} else {
rooms::table
.find(room_id)
.select(rooms::state_frame_id)
.first::<Option<i64>>(&mut connect()?)?
};
frame_id.ok_or(MatrixError::not_found("room frame is not found").into())
}
pub fn get_pdu_frame_id(event_id: &EventId) -> AppResult<i64> {
let frame_id = event_points::table
.filter(event_points::event_id.eq(event_id))
.select(event_points::frame_id)
.first::<Option<i64>>(&mut connect()?)?;
frame_id.ok_or(MatrixError::not_found("pdu frame is not found").into())
}
/// Returns (state_hash, already_existed)
pub fn ensure_frame(room_id: &RoomId, hash_data: Vec<u8>) -> AppResult<i64> {
diesel::insert_into(room_state_frames::table)
.values((
room_state_frames::room_id.eq(room_id),
room_state_frames::hash_data.eq(hash_data),
))
.on_conflict_do_nothing()
.returning(room_state_frames::id)
.get_result(&mut connect()?)
.map_err(Into::into)
}
pub fn get_frame_id(room_id: &RoomId, hash_data: &[u8]) -> AppResult<i64> {
room_state_frames::table
.filter(room_state_frames::room_id.eq(room_id))
.filter(room_state_frames::hash_data.eq(hash_data))
.select(room_state_frames::id)
.get_result(&mut connect()?)
.map_err(Into::into)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/state/graph.rs | crates/server/src/room/state/graph.rs | use std::borrow::Borrow;
use std::cmp::{Ordering, Reverse};
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use crate::AppResult;
use crate::core::{EventId, UnixMillis};
/// Sorts the event graph based on number of outgoing/incoming edges.
///
/// `key_fn` is used as to obtain the power level and age of an event for
/// breaking ties (together with the event ID).
#[tracing::instrument(level = "debug", skip_all)]
pub async fn lexicographical_topological_sort<Id, F, Fut, Hasher>(
graph: &HashMap<Id, HashSet<Id, Hasher>>,
key_fn: &F,
) -> AppResult<Vec<Id>>
where
F: Fn(Id) -> Fut + Sync,
Fut: Future<Output = AppResult<(usize, UnixMillis)>> + Send,
Id: Borrow<EventId> + Clone + Eq + Hash + Ord + Send + Sync,
Hasher: BuildHasher + Default + Clone + Send + Sync,
{
#[derive(PartialEq, Eq)]
struct TieBreaker<'a, Id> {
power_level: usize,
origin_server_ts: UnixMillis,
event_id: &'a Id,
}
impl<Id> Ord for TieBreaker<'_, Id>
where
Id: Ord,
{
fn cmp(&self, other: &Self) -> Ordering {
// NOTE: the power level comparison is "backwards" intentionally.
// See the "Mainline ordering" section of the Matrix specification
// around where it says the following:
//
// > for events `x` and `y`, `x < y` if [...]
//
// <https://spec.matrix.org/v1.12/rooms/v11/#definitions>
other
.power_level
.cmp(&self.power_level)
.then(self.origin_server_ts.cmp(&other.origin_server_ts))
.then(self.event_id.cmp(other.event_id))
}
}
impl<Id> PartialOrd for TieBreaker<'_, Id>
where
Id: Ord,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
debug!("starting lexicographical topological sort");
// NOTE: an event that has no incoming edges happened most recently,
// and an event that has no outgoing edges happened least recently.
// NOTE: this is basically Kahn's algorithm except we look at nodes with no
// outgoing edges, c.f.
// https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm
// outdegree_map is an event referring to the events before it, the
// more outdegree's the more recent the event.
let mut outdegree_map = graph.clone();
// The number of events that depend on the given event (the EventId key)
// How many events reference this event in the DAG as a parent
let mut reverse_graph: HashMap<_, HashSet<_, Hasher>> = HashMap::new();
// Vec of nodes that have zero out degree, least recent events.
let mut zero_outdegree = Vec::new();
for (node, edges) in graph {
if edges.is_empty() {
let (power_level, origin_server_ts) = key_fn(node.clone()).await?;
// The `Reverse` is because rusts `BinaryHeap` sorts largest -> smallest we need
// smallest -> largest
zero_outdegree.push(Reverse(TieBreaker {
power_level,
origin_server_ts,
event_id: node,
}));
}
reverse_graph.entry(node).or_default();
for edge in edges {
reverse_graph.entry(edge).or_default().insert(node);
}
}
let mut heap = BinaryHeap::from(zero_outdegree);
// We remove the oldest node (most incoming edges) and check against all other
let mut sorted = vec![];
// Destructure the `Reverse` and take the smallest `node` each time
while let Some(Reverse(item)) = heap.pop() {
let node = item.event_id;
for &parent in reverse_graph
.get(node)
.expect("EventId in heap is also in reverse_graph")
{
// The number of outgoing edges this node has
let out = outdegree_map
.get_mut(parent.borrow())
.expect("outdegree_map knows of all referenced EventIds");
// Only push on the heap once older events have been cleared
out.remove(node.borrow());
if out.is_empty() {
let (power_level, origin_server_ts) = key_fn(parent.clone()).await?;
heap.push(Reverse(TieBreaker {
power_level,
origin_server_ts,
event_id: parent,
}));
}
}
// synapse yields we push then return the vec
sorted.push(node.clone());
}
Ok(sorted)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/alias/remote.rs | crates/server/src/room/alias/remote.rs | use crate::core::federation::query::RoomInfoResBody;
use crate::core::federation::query::directory_request;
use crate::core::identifiers::*;
use crate::{AppResult, GetUrlOrigin, MatrixError};
pub(super) async fn remote_resolve(
room_alias: &RoomAliasId,
servers: Vec<OwnedServerName>,
) -> AppResult<(OwnedRoomId, Vec<OwnedServerName>)> {
debug!(?room_alias, servers = ?servers, "remote resolve");
let servers = [vec![room_alias.server_name().to_owned()], servers].concat();
let mut resolved_servers = Vec::new();
let mut resolved_room_id: Option<OwnedRoomId> = None;
for server in servers {
match remote_request(room_alias, &server).await {
Err(e) => tracing::error!("Failed to query for {room_alias:?} from {server}: {e}"),
Ok(RoomInfoResBody { room_id, servers }) => {
debug!(
"Server {server} answered with {room_id:?} for {room_alias:?} servers: \
{servers:?}"
);
resolved_room_id.get_or_insert(room_id);
add_server(&mut resolved_servers, server);
if !servers.is_empty() {
add_servers(&mut resolved_servers, servers);
break;
}
}
}
}
resolved_room_id
.map(|room_id| (room_id, resolved_servers))
.ok_or_else(|| {
MatrixError::not_found("No servers could assist in resolving the room alias").into()
})
}
async fn remote_request(
room_alias: &RoomAliasId,
server: &ServerName,
) -> AppResult<RoomInfoResBody> {
let request = directory_request(&server.origin().await, room_alias)?.into_inner();
crate::sending::send_federation_request(server, request, None)
.await?
.json::<RoomInfoResBody>()
.await
.map_err(Into::into)
}
fn add_servers(servers: &mut Vec<OwnedServerName>, new: Vec<OwnedServerName>) {
for server in new {
add_server(servers, server);
}
}
fn add_server(servers: &mut Vec<OwnedServerName>, server: OwnedServerName) {
if !servers.contains(&server) {
servers.push(server);
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/space/pagination_token.rs | crates/server/src/room/space/pagination_token.rs | use std::fmt::{Display, Formatter};
use std::str::FromStr;
use crate::core::{MatrixError, Seqnum};
// TODO: perhaps use some better form of token rather than just room count
#[derive(Debug, Eq, PartialEq)]
pub struct PaginationToken {
/// Path down the hierarchy of the room to start the response at,
/// excluding the root space.
pub room_sns: Vec<Seqnum>,
pub limit: usize,
pub max_depth: usize,
pub suggested_only: bool,
}
impl FromStr for PaginationToken {
type Err = MatrixError;
fn from_str(value: &str) -> Result<Self, Self::Err> {
let mut values = value.split('_');
let mut pag_tok = || {
let room_sns = values
.next()?
.split(',')
.filter_map(|room_s| i64::from_str(room_s).ok())
.collect();
let limit = usize::from_str(values.next()?).ok()?;
let max_depth = usize::from_str(values.next()?).ok()?;
let slice = values.next()?;
let suggested_only = if values.next().is_none() {
if slice == "true" {
true
} else if slice == "false" {
false
} else {
None?
}
} else {
None?
};
Some(Self {
room_sns,
limit,
max_depth,
suggested_only,
})
};
if let Some(token) = pag_tok() {
Ok(token)
} else {
Err(MatrixError::invalid_param("invalid token"))
}
}
}
impl Display for PaginationToken {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let room_sns = self
.room_sns
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(",");
write!(
f,
"{room_sns}_{}_{}_{}",
self.limit, self.max_depth, self.suggested_only
)
}
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/timeline/stream.rs | crates/server/src/room/timeline/stream.rs | use diesel::prelude::*;
use indexmap::IndexMap;
use crate::core::client::filter::{RoomEventFilter, UrlFilter};
use crate::core::identifiers::*;
use crate::core::{Direction, Seqnum};
use crate::data::connect;
use crate::data::schema::*;
use crate::event::BatchToken;
use crate::{AppResult, SnPduEvent, data, utils};
/// Returns an iterator over all PDUs in a room.
pub fn load_all_pdus(
user_id: Option<&UserId>,
room_id: &RoomId,
until_tk: Option<BatchToken>,
) -> AppResult<IndexMap<i64, SnPduEvent>> {
load_pdus_forward(user_id, room_id, None, until_tk, None, usize::MAX)
}
pub fn load_pdus_forward(
user_id: Option<&UserId>,
room_id: &RoomId,
since_tk: Option<BatchToken>,
until_tk: Option<BatchToken>,
filter: Option<&RoomEventFilter>,
limit: usize,
) -> AppResult<IndexMap<i64, SnPduEvent>> {
load_pdus(
user_id,
room_id,
since_tk,
until_tk,
limit,
filter,
Direction::Forward,
)
}
pub fn load_pdus_backward(
user_id: Option<&UserId>,
room_id: &RoomId,
since_tk: Option<BatchToken>,
until_tk: Option<BatchToken>,
filter: Option<&RoomEventFilter>,
limit: usize,
) -> AppResult<IndexMap<i64, SnPduEvent>> {
load_pdus(
user_id,
room_id,
since_tk,
until_tk,
limit,
filter,
Direction::Backward,
)
}
/// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order.
/// Skips events before user joined the room.
#[tracing::instrument]
pub fn load_pdus(
user_id: Option<&UserId>,
room_id: &RoomId,
since_tk: Option<BatchToken>,
until_tk: Option<BatchToken>,
limit: usize,
filter: Option<&RoomEventFilter>,
dir: Direction,
) -> AppResult<IndexMap<Seqnum, SnPduEvent>> {
let mut list: IndexMap<Seqnum, SnPduEvent> = IndexMap::with_capacity(limit.clamp(10, 100));
let mut start_sn = if dir == Direction::Forward {
0
} else {
data::curr_sn()? + 1
};
while list.len() < limit {
let mut query = events::table
.filter(events::room_id.eq(room_id))
.into_boxed();
if dir == Direction::Forward {
if let Some(since_tk) = since_tk {
query = query.filter(events::stream_ordering.ge(since_tk.stream_ordering()));
}
if let Some(until_tk) = until_tk {
query = query.filter(events::stream_ordering.lt(until_tk.stream_ordering()));
}
} else {
if let Some(since_tk) = since_tk {
query = query.filter(events::stream_ordering.lt(since_tk.stream_ordering()));
}
if let Some(until_tk) = until_tk {
query = query.filter(events::stream_ordering.ge(until_tk.stream_ordering()));
}
}
if let Some(filter) = filter {
if let Some(url_filter) = &filter.url_filter {
match url_filter {
UrlFilter::EventsWithUrl => query = query.filter(events::contains_url.eq(true)),
UrlFilter::EventsWithoutUrl => {
query = query.filter(events::contains_url.eq(false))
}
}
}
if !filter.not_types.is_empty() {
query = query.filter(events::ty.ne_all(&filter.not_types));
}
if !filter.not_rooms.is_empty() {
query = query.filter(events::room_id.ne_all(&filter.not_rooms));
}
if let Some(rooms) = &filter.rooms
&& !rooms.is_empty()
{
query = query.filter(events::room_id.eq_any(rooms));
}
if let Some(senders) = &filter.senders
&& !senders.is_empty()
{
query = query.filter(events::sender_id.eq_any(senders));
}
if let Some(types) = &filter.types
&& !types.is_empty()
{
query = query.filter(events::ty.eq_any(types));
}
}
let events: Vec<(OwnedEventId, Seqnum)> = if dir == Direction::Forward {
query
.filter(events::sn.gt(start_sn))
.filter(events::is_outlier.eq(false))
.order(events::stream_ordering.desc())
.limit(utils::usize_to_i64(limit))
.select((events::id, events::sn))
.load::<(OwnedEventId, Seqnum)>(&mut connect()?)?
.into_iter()
.rev()
.collect()
} else {
query
.filter(events::sn.lt(start_sn))
.filter(events::is_outlier.eq(false))
.order(events::sn.desc())
.limit(utils::usize_to_i64(limit))
.select((events::id, events::sn))
.load::<(OwnedEventId, Seqnum)>(&mut connect()?)?
.into_iter()
.collect()
};
if events.is_empty() {
break;
}
start_sn = if dir == Direction::Forward {
if let Some(sn) = events.iter().map(|(_, sn)| sn).max() {
*sn
} else {
break;
}
} else if let Some(sn) = events.iter().map(|(_, sn)| sn).min() {
*sn
} else {
break;
};
for (event_id, event_sn) in events {
if let Ok(mut pdu) = super::get_pdu(&event_id) {
if let Some(user_id) = user_id {
if !pdu.user_can_see(user_id)? {
continue;
}
if pdu.sender != user_id {
pdu.remove_transaction_id()?;
}
pdu.add_unsigned_membership(user_id)?;
}
pdu.add_age()?;
list.insert(event_sn, pdu);
if list.len() >= limit {
break;
}
}
}
}
Ok(list)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/timeline/topolo.rs | crates/server/src/room/timeline/topolo.rs | use diesel::prelude::*;
use indexmap::IndexMap;
use palpo_data::print_query;
use palpo_data::room::DbEvent;
use crate::core::client::filter::{RoomEventFilter, UrlFilter};
use crate::core::identifiers::*;
use crate::core::{Direction, Seqnum};
use crate::data::connect;
use crate::data::schema::*;
use crate::event::BatchToken;
use crate::{AppResult, SnPduEvent, utils};
pub fn load_pdus_forward(
user_id: Option<&UserId>,
room_id: &RoomId,
since_tk: Option<BatchToken>,
until_tk: Option<BatchToken>,
filter: Option<&RoomEventFilter>,
limit: usize,
) -> AppResult<IndexMap<Seqnum, SnPduEvent>> {
load_pdus(
user_id,
room_id,
since_tk,
until_tk,
limit,
filter,
Direction::Forward,
)
}
pub fn load_pdus_backward(
user_id: Option<&UserId>,
room_id: &RoomId,
since_tk: Option<BatchToken>,
until_tk: Option<BatchToken>,
filter: Option<&RoomEventFilter>,
limit: usize,
) -> AppResult<IndexMap<Seqnum, SnPduEvent>> {
load_pdus(
user_id,
room_id,
since_tk,
until_tk,
limit,
filter,
Direction::Backward,
)
}
/// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order.
/// Skips events before user joined the room.
#[tracing::instrument]
pub fn load_pdus(
user_id: Option<&UserId>,
room_id: &RoomId,
since_tk: Option<BatchToken>,
until_tk: Option<BatchToken>,
limit: usize,
filter: Option<&RoomEventFilter>,
dir: Direction,
) -> AppResult<IndexMap<Seqnum, SnPduEvent>> {
let mut list: IndexMap<Seqnum, SnPduEvent> = IndexMap::with_capacity(limit.clamp(10, 100));
let mut offset = 0;
while list.len() < limit {
let mut query = events::table
.filter(events::room_id.eq(room_id))
.into_boxed();
if dir == Direction::Forward {
if let Some(since_tk) = since_tk {
match since_tk {
BatchToken::Live { stream_ordering } => {
query = query.filter(events::stream_ordering.ge(stream_ordering));
}
BatchToken::Historic {
topological_ordering,
stream_ordering,
} => {
query = query.filter(
events::topological_ordering.gt(topological_ordering).or(
events::topological_ordering
.eq(topological_ordering)
.and(events::stream_ordering.ge(stream_ordering)),
),
);
}
}
}
if let Some(until_tk) = until_tk {
match until_tk {
BatchToken::Live { stream_ordering } => {
query = query.filter(events::stream_ordering.le(stream_ordering));
}
BatchToken::Historic {
topological_ordering,
stream_ordering,
} => {
query = query.filter(
events::topological_ordering.lt(topological_ordering).or(
events::topological_ordering
.eq(topological_ordering)
.and(events::stream_ordering.le(stream_ordering)),
),
);
}
}
}
} else {
if let Some(since_tk) = since_tk {
match since_tk {
BatchToken::Live { stream_ordering } => {
query = query.filter(events::stream_ordering.le(stream_ordering));
}
BatchToken::Historic {
topological_ordering,
stream_ordering,
} => {
query = query.filter(
events::topological_ordering.lt(topological_ordering).or(
events::topological_ordering
.eq(topological_ordering)
.and(events::stream_ordering.le(stream_ordering)),
),
);
}
}
}
if let Some(until_tk) = until_tk {
if let Some(topological_ordering) = until_tk.topological_ordering() {
query = query.filter(
events::topological_ordering.gt(topological_ordering).or(
events::topological_ordering
.eq(topological_ordering)
.and(events::stream_ordering.ge(until_tk.stream_ordering())),
),
);
} else {
query = query.filter(events::stream_ordering.ge(until_tk.stream_ordering()));
}
}
}
if let Some(filter) = filter {
if let Some(url_filter) = &filter.url_filter {
match url_filter {
UrlFilter::EventsWithUrl => query = query.filter(events::contains_url.eq(true)),
UrlFilter::EventsWithoutUrl => {
query = query.filter(events::contains_url.eq(false))
}
}
}
if !filter.not_types.is_empty() {
query = query.filter(events::ty.ne_all(&filter.not_types));
}
if !filter.not_rooms.is_empty() {
query = query.filter(events::room_id.ne_all(&filter.not_rooms));
}
if let Some(rooms) = &filter.rooms
&& !rooms.is_empty()
{
query = query.filter(events::room_id.eq_any(rooms));
}
if let Some(senders) = &filter.senders
&& !senders.is_empty()
{
query = query.filter(events::sender_id.eq_any(senders));
}
if let Some(types) = &filter.types
&& !types.is_empty()
{
query = query.filter(events::ty.eq_any(types));
}
}
let events: Vec<(OwnedEventId, Seqnum, i64)> = if dir == Direction::Forward {
query
.order((
events::topological_ordering.asc(),
events::origin_server_ts.asc(),
events::stream_ordering.asc(),
))
.offset(offset)
.limit(utils::usize_to_i64(limit))
.select((events::id, events::sn, events::stream_ordering))
.load::<(OwnedEventId, Seqnum, i64)>(&mut connect()?)?
.into_iter()
.rev()
.collect()
} else {
query = query
.order((
events::topological_ordering.desc(),
events::origin_server_ts.desc(),
events::stream_ordering.desc(),
))
.offset(offset)
.limit(utils::usize_to_i64(limit));
query
.select((events::id, events::sn, events::stream_ordering))
.load::<(OwnedEventId, Seqnum, i64)>(&mut connect()?)?
.into_iter()
.collect()
};
if events.is_empty() {
break;
}
let count = events.len();
offset += count as i64;
for (event_id, event_sn, _) in events {
if let Ok(mut pdu) = super::get_pdu(&event_id) {
if let Some(user_id) = user_id {
if !pdu.user_can_see(user_id)? {
continue;
}
if pdu.sender != user_id {
pdu.remove_transaction_id()?;
}
pdu.add_unsigned_membership(user_id)?;
}
pdu.add_age()?;
list.insert(event_sn, pdu);
if list.len() >= limit {
break;
}
}
}
if count < limit {
break;
}
}
Ok(list)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/room/timeline/backfill.rs | crates/server/src/room/timeline/backfill.rs | use std::collections::BTreeMap;
use diesel::prelude::*;
use indexmap::IndexMap;
use itertools::Itertools;
use serde::Deserialize;
use crate::core::Seqnum;
use crate::core::events::TimelineEventType;
use crate::core::federation::backfill::{BackfillReqArgs, BackfillResBody, backfill_request};
use crate::core::identifiers::*;
use crate::core::serde::{CanonicalJsonObject, JsonValue, RawJsonValue};
use crate::data::connect;
use crate::data::schema::*;
use crate::event::BatchToken;
use crate::event::handler::process_to_timeline_pdu;
use crate::event::{handler, parse_fetched_pdu};
use crate::{AppError, AppResult, GetUrlOrigin, SnPduEvent, room};
#[tracing::instrument(skip_all)]
pub async fn backfill_if_required(
room_id: &RoomId,
from_tk: &BatchToken,
pdus: &IndexMap<Seqnum, SnPduEvent>,
limit: usize,
) -> AppResult<Vec<SnPduEvent>> {
let mut depths = pdus
.values()
.map(|p| (p.event_id.clone(), p.depth as i64))
.collect::<Vec<_>>();
if let Some(topological_ordering) = from_tk.topological_ordering() {
if let Ok(event_id) = events::table
.filter(events::room_id.eq(room_id))
.filter(events::topological_ordering.eq(topological_ordering))
.select(events::id)
.first::<OwnedEventId>(&mut connect()?)
{
depths.push((event_id, topological_ordering.abs()));
}
}
depths.sort_unstable_by(|a, b| b.1.cmp(&a.1));
let (prev_event, prev_depth) = if let Some(depth) = depths.first() {
depth
} else {
return Ok(vec![]);
};
let mut prev_depth = *prev_depth;
let mut prev_event = prev_event;
let last_depth = depths.last().map(|&(_, d)| d).unwrap_or_default() as i64;
if prev_depth == last_depth {
return Ok(vec![]);
}
let depths = events::table
.filter(events::depth.lt(prev_depth))
.filter(events::depth.ge(last_depth))
.order(events::depth.desc())
.select((events::id, events::depth))
.load::<(OwnedEventId, i64)>(&mut connect()?)?;
let mut found_big_gap = false;
let mut number_of_gaps = 0;
let mut fill_from = None;
for &(ref event_id, depth) in depths.iter() {
let delta = prev_depth - depth;
if delta > 1 {
number_of_gaps += 1;
if fill_from.is_none() {
fill_from = Some(prev_event);
}
}
if delta >= 2 {
found_big_gap = true;
if fill_from.is_none() {
fill_from = Some(prev_event);
}
break;
}
prev_depth = depth;
prev_event = event_id;
}
if number_of_gaps < 3 && !found_big_gap {
return Ok(vec![]);
};
let Some(fill_from) = fill_from else {
return Ok(vec![]);
};
let admin_servers = room::admin_servers(room_id, false)?;
let room_version = room::get_version(room_id)?;
for backfill_server in &admin_servers {
info!("asking {backfill_server} for backfill");
let request = backfill_request(
&backfill_server.origin().await,
BackfillReqArgs {
room_id: room_id.to_owned(),
v: vec![fill_from.to_owned()],
limit: (limit * 2).max(50), //Avoid not enough filled and will get messages with gap.
},
)?
.into_inner();
match crate::sending::send_federation_request(backfill_server, request, None)
.await?
.json::<BackfillResBody>()
.await
{
Ok(response) => {
let mut events = Vec::new();
let pdus = response
.pdus
.into_iter()
.filter_map(|pdu| {
let val =
serde_json::from_str::<BTreeMap<String, JsonValue>>(pdu.get()).ok()?;
let depth = val.get("depth")?.as_i64()?;
Some((pdu, depth))
})
.sorted_by(|a, b| a.1.cmp(&b.1))
.map(|(pdu, _)| pdu)
.collect::<Vec<_>>();
let mut saved_pdu_contents = Vec::new();
for pdu in pdus {
match backfill_pdu(backfill_server, room_id, &room_version, pdu).await {
Ok(p) => {
saved_pdu_contents.push(p);
}
Err(e) => {
warn!("failed to add backfilled pdu: {e}");
}
}
}
for (pdu, content) in saved_pdu_contents {
let event_id = pdu.event_id.clone();
if let Err(e) =
process_to_timeline_pdu(pdu, content, Some(backfill_server)).await
{
error!("failed to process backfill pdu to timeline {}", e);
} else {
let pdu = super::get_pdu(&event_id)?;
events.push(pdu);
debug!("succeed to process backfill pdu to timeline {}", event_id);
}
}
return Ok(events);
}
Err(e) => {
warn!("{backfill_server} could not provide backfill: {e}");
}
}
}
info!("no servers could backfill");
Ok(vec![])
}
#[tracing::instrument(skip(pdu))]
pub async fn backfill_pdu(
remote_server: &ServerName,
room_id: &RoomId,
room_version: &RoomVersionId,
pdu: Box<RawJsonValue>,
) -> AppResult<(SnPduEvent, CanonicalJsonObject)> {
let (event_id, value) = parse_fetched_pdu(room_id, room_version, &pdu)?;
// Skip the PDU if we already have it as a timeline event
if let Ok(pdu) = super::get_pdu(&event_id) {
info!("we already know {event_id}, skipping backfill");
let value = super::get_pdu_json(&event_id)?
.ok_or_else(|| AppError::public("event json not found"))?;
return Ok((pdu, value));
}
let Some(outlier_pdu) =
handler::process_to_outlier_pdu(remote_server, &event_id, room_id, room_version, value)
.await?
else {
return Err(AppError::internal("failed to process backfilled pdu"));
};
let (pdu, value, _) = outlier_pdu.save_to_database(true)?;
if pdu.event_ty == TimelineEventType::RoomMessage {
#[derive(Deserialize)]
struct ExtractBody {
body: Option<String>,
}
let _content = pdu
.get_content::<ExtractBody>()
.map_err(|_| AppError::internal("invalid content in pdu."))?;
}
Ok((pdu, value))
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/user/key.rs | crates/server/src/user/key.rs | use std::collections::{BTreeMap, HashMap, hash_map};
use std::time::Instant;
use diesel::prelude::*;
use futures_util::stream::{FuturesUnordered, StreamExt};
use serde_json::json;
use crate::core::client::key::ClaimKeysResBody;
use crate::core::device::DeviceListUpdateContent;
use crate::core::encryption::{CrossSigningKey, DeviceKeys, OneTimeKey};
use crate::core::federation::key::{
QueryKeysReqBody, QueryKeysResBody, claim_keys_request, query_keys_request,
};
use crate::core::federation::transaction::{Edu, SigningKeyUpdateContent};
use crate::core::identifiers::*;
use crate::core::serde::JsonValue;
use crate::core::{DeviceKeyAlgorithm, UnixMillis, client, federation};
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::{
DbOneTimeKey, NewDbCrossSignature, NewDbCrossSigningKey, NewDbKeyChange, NewDbOneTimeKey,
};
use crate::exts::*;
use crate::user::clean_signatures;
use crate::{AppError, AppResult, BAD_QUERY_RATE_LIMITER, MatrixError, config, data, sending};
pub async fn query_keys<F: Fn(&UserId) -> bool>(
sender_id: Option<&UserId>,
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
allowed_signatures: F,
_include_display_names: bool,
) -> AppResult<client::key::KeysResBody> {
let mut master_keys = BTreeMap::new();
let mut self_signing_keys = BTreeMap::new();
let mut user_signing_keys = BTreeMap::new();
let mut device_keys = BTreeMap::new();
let mut get_over_federation = HashMap::new();
for (user_id, device_ids) in device_keys_input {
if user_id.server_name() != config::get().server_name {
get_over_federation
.entry(user_id.server_name())
.or_insert_with(Vec::new)
.push((user_id, device_ids));
continue;
}
if device_ids.is_empty() {
let mut container = BTreeMap::new();
for device_id in data::user::all_device_ids(user_id)? {
if let Some(mut keys) = data::user::get_device_keys_and_sigs(user_id, &device_id)? {
let device = data::user::device::get_device(user_id, &device_id)?;
if let Some(display_name) = &device.display_name {
keys.unsigned.device_display_name = display_name.to_owned().into();
}
container.insert(device_id, keys);
}
}
device_keys.insert(user_id.to_owned(), container);
} else {
for device_id in device_ids {
let mut container = BTreeMap::new();
if let Some(keys) = data::user::get_device_keys_and_sigs(user_id, device_id)? {
container.insert(device_id.to_owned(), keys);
}
device_keys.insert(user_id.to_owned(), container);
}
}
if let Some(master_key) =
crate::user::get_allowed_master_key(sender_id, user_id, &allowed_signatures)?
{
master_keys.insert(user_id.to_owned(), master_key);
}
if let Some(self_signing_key) =
crate::user::get_allowed_self_signing_key(sender_id, user_id, &allowed_signatures)?
{
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
}
if Some(&**user_id) == sender_id
&& let Some(user_signing_key) = crate::user::get_user_signing_key(user_id)?
{
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
}
}
let mut failures = BTreeMap::new();
let back_off = |id| match BAD_QUERY_RATE_LIMITER.write().unwrap().entry(id) {
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
};
let mut futures: FuturesUnordered<_> = get_over_federation
.into_iter()
.map(|(server, vec)| async move {
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
}
let request = query_keys_request(
&server.origin().await,
QueryKeysReqBody {
device_keys: device_keys_input_fed,
},
)?
.into_inner();
let response_body = crate::sending::send_federation_request(server, request, None)
.await?
.json::<QueryKeysResBody>()
.await
.map_err(|_e| AppError::public("Query took too long"));
Ok::<(&ServerName, AppResult<QueryKeysResBody>), AppError>((server, response_body))
})
.collect();
while let Some(Ok((server, response))) = futures.next().await {
match response {
Ok(response) => {
for (user_id, mut master_key) in response.master_keys {
if let Some(our_master_key) = crate::user::get_allowed_master_key(
sender_id,
&user_id,
&allowed_signatures,
)? {
master_key.signatures.extend(our_master_key.signatures);
}
let json = serde_json::to_value(master_key).expect("to_value always works");
let raw =
serde_json::from_value(json).expect("RawJson::from_value always works");
crate::user::add_cross_signing_keys(
&user_id, &raw, &None, &None,
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
)?;
master_keys.insert(user_id.to_owned(), raw);
}
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
}
_ => {
back_off(server.to_owned());
failures.insert(server.to_string(), json!({}));
}
}
}
Ok(client::key::KeysResBody {
master_keys,
self_signing_keys,
user_signing_keys,
device_keys,
failures,
})
}
pub async fn claim_one_time_keys(
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, DeviceKeyAlgorithm>>,
) -> AppResult<ClaimKeysResBody> {
let mut one_time_keys = BTreeMap::new();
let mut get_over_federation = BTreeMap::new();
for (user_id, map) in one_time_keys_input {
if user_id.server_name().is_remote() {
get_over_federation
.entry(user_id.server_name())
.or_insert_with(Vec::new)
.push((user_id, map));
continue;
}
let mut container = BTreeMap::new();
for (device_id, key_algorithm) in map {
if let Some(one_time_keys) =
crate::user::claim_one_time_key(user_id, device_id, key_algorithm)?
{
let mut c = BTreeMap::new();
c.insert(one_time_keys.0, one_time_keys.1);
container.insert(device_id.clone(), c);
}
}
if !container.is_empty() {
one_time_keys.insert(user_id.clone(), container);
}
}
let mut failures = BTreeMap::new();
let mut futures: FuturesUnordered<_> = FuturesUnordered::new();
for (server, vec) in get_over_federation.into_iter() {
let mut one_time_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
}
let request = claim_keys_request(
&server.origin().await,
federation::key::ClaimKeysReqBody {
timeout: None,
one_time_keys: one_time_keys_input_fed,
},
)?
.into_inner();
futures.push(async move {
(
server,
crate::sending::send_federation_request(server, request, None).await,
)
});
}
while let Some((server, response)) = futures.next().await {
match response {
Ok(response) => match response.json::<federation::key::ClaimKeysResBody>().await {
Ok(keys) => {
one_time_keys.extend(keys.one_time_keys);
}
Err(_e) => {
failures.insert(server.to_string(), json!({}));
}
},
Err(_e) => {
failures.insert(server.to_string(), json!({}));
}
}
}
Ok(ClaimKeysResBody {
failures,
one_time_keys,
})
}
pub fn get_master_key(user_id: &UserId) -> AppResult<Option<CrossSigningKey>> {
let key_data = e2e_cross_signing_keys::table
.filter(e2e_cross_signing_keys::user_id.eq(user_id))
.filter(e2e_cross_signing_keys::key_type.eq("master"))
.select(e2e_cross_signing_keys::key_data)
.first::<JsonValue>(&mut connect()?)
.optional()?;
if let Some(key_data) = key_data {
Ok(serde_json::from_value(key_data).ok())
} else {
Ok(None)
}
}
pub fn get_allowed_master_key(
sender_id: Option<&UserId>,
user_id: &UserId,
allowed_signatures: &dyn Fn(&UserId) -> bool,
) -> AppResult<Option<CrossSigningKey>> {
let key_data = e2e_cross_signing_keys::table
.filter(e2e_cross_signing_keys::user_id.eq(user_id))
.filter(e2e_cross_signing_keys::key_type.eq("master"))
.order_by(e2e_cross_signing_keys::id.desc())
.select(e2e_cross_signing_keys::key_data)
.first::<JsonValue>(&mut connect()?)
.optional()?;
if let Some(mut key_data) = key_data {
clean_signatures(&mut key_data, sender_id, user_id, allowed_signatures)?;
Ok(serde_json::from_value(key_data).ok())
} else {
Ok(None)
}
}
pub fn get_self_signing_key(user_id: &UserId) -> AppResult<Option<CrossSigningKey>> {
let key_data = e2e_cross_signing_keys::table
.filter(e2e_cross_signing_keys::user_id.eq(user_id))
.filter(e2e_cross_signing_keys::key_type.eq("self_signing"))
.order_by(e2e_cross_signing_keys::id.desc())
.select(e2e_cross_signing_keys::key_data)
.first::<JsonValue>(&mut connect()?)
.optional()?;
if let Some(key_data) = key_data {
Ok(serde_json::from_value(key_data).ok())
} else {
Ok(None)
}
}
pub fn get_allowed_self_signing_key(
sender_id: Option<&UserId>,
user_id: &UserId,
allowed_signatures: &dyn Fn(&UserId) -> bool,
) -> AppResult<Option<CrossSigningKey>> {
let key_data = e2e_cross_signing_keys::table
.filter(e2e_cross_signing_keys::user_id.eq(user_id))
.filter(e2e_cross_signing_keys::key_type.eq("self_signing"))
.order_by(e2e_cross_signing_keys::id.desc())
.select(e2e_cross_signing_keys::key_data)
.first::<JsonValue>(&mut connect()?)
.optional()?;
if let Some(mut key_data) = key_data {
clean_signatures(&mut key_data, sender_id, user_id, allowed_signatures)?;
Ok(serde_json::from_value(key_data).ok())
} else {
Ok(None)
}
}
pub fn get_user_signing_key(user_id: &UserId) -> AppResult<Option<CrossSigningKey>> {
e2e_cross_signing_keys::table
.filter(e2e_cross_signing_keys::user_id.eq(user_id))
.filter(e2e_cross_signing_keys::key_type.eq("user_signing"))
.order_by(e2e_cross_signing_keys::id.desc())
.select(e2e_cross_signing_keys::key_data)
.first::<JsonValue>(&mut connect()?)
.map(|data| serde_json::from_value(data).ok())
.optional()
.map(|v| v.flatten())
.map_err(Into::into)
}
pub fn add_one_time_key(
user_id: &UserId,
device_id: &DeviceId,
key_id: &DeviceKeyId,
one_time_key: &OneTimeKey,
) -> AppResult<()> {
diesel::insert_into(e2e_one_time_keys::table)
.values(&NewDbOneTimeKey {
user_id: user_id.to_owned(),
device_id: device_id.to_owned(),
algorithm: key_id.algorithm().to_string(),
key_id: key_id.to_owned(),
key_data: serde_json::to_value(one_time_key).unwrap(),
created_at: UnixMillis::now(),
})
.on_conflict((
e2e_one_time_keys::user_id,
e2e_one_time_keys::device_id,
e2e_one_time_keys::algorithm,
e2e_one_time_keys::key_id,
))
.do_update()
.set(e2e_one_time_keys::key_data.eq(serde_json::to_value(one_time_key).unwrap()))
.execute(&mut connect()?)?;
Ok(())
}
pub fn claim_one_time_key(
user_id: &UserId,
device_id: &DeviceId,
key_algorithm: &DeviceKeyAlgorithm,
) -> AppResult<Option<(OwnedDeviceKeyId, OneTimeKey)>> {
let one_time_key = e2e_one_time_keys::table
.filter(e2e_one_time_keys::user_id.eq(user_id))
.filter(e2e_one_time_keys::device_id.eq(device_id))
.filter(e2e_one_time_keys::algorithm.eq(key_algorithm.as_ref()))
.order(e2e_one_time_keys::id.asc())
.first::<DbOneTimeKey>(&mut connect()?)
.optional()?;
if let Some(DbOneTimeKey {
id,
key_id,
key_data,
..
}) = one_time_key
{
diesel::delete(e2e_one_time_keys::table.find(id)).execute(&mut connect()?)?;
Ok(Some((
key_id,
serde_json::from_value::<OneTimeKey>(key_data)?,
)))
} else {
Ok(None)
}
}
pub fn add_device_keys(
user_id: &UserId,
device_id: &DeviceId,
device_keys: &DeviceKeys,
) -> AppResult<()> {
data::user::add_device_keys(user_id, device_id, device_keys)?;
mark_device_key_update(user_id, device_id)?;
send_device_key_update(user_id, device_id)?;
Ok(())
}
pub fn add_cross_signing_keys(
user_id: &UserId,
master_key: &CrossSigningKey,
self_signing_key: &Option<CrossSigningKey>,
user_signing_key: &Option<CrossSigningKey>,
notify: bool,
) -> AppResult<()> {
// TODO: Check signatures
diesel::insert_into(e2e_cross_signing_keys::table)
.values(NewDbCrossSigningKey {
user_id: user_id.to_owned(),
key_type: "master".to_owned(),
key_data: serde_json::to_value(master_key)?,
})
.execute(&mut connect()?)?;
// Self-signing key
if let Some(self_signing_key) = self_signing_key {
let mut self_signing_key_ids = self_signing_key.keys.values();
let _self_signing_key_id =
self_signing_key_ids
.next()
.ok_or(MatrixError::invalid_param(
"Self signing key contained no key.",
))?;
if self_signing_key_ids.next().is_some() {
return Err(MatrixError::invalid_param(
"Self signing key contained more than one key.",
)
.into());
}
diesel::insert_into(e2e_cross_signing_keys::table)
.values(NewDbCrossSigningKey {
user_id: user_id.to_owned(),
key_type: "self_signing".to_owned(),
key_data: serde_json::to_value(self_signing_key)?,
})
.execute(&mut connect()?)?;
}
// User-signing key
if let Some(user_signing_key) = user_signing_key {
let mut user_signing_key_ids = user_signing_key.keys.values();
let _user_signing_key_id =
user_signing_key_ids
.next()
.ok_or(MatrixError::invalid_param(
"User signing key contained no key.",
))?;
if user_signing_key_ids.next().is_some() {
return Err(MatrixError::invalid_param(
"User signing key contained more than one key.",
)
.into());
}
diesel::insert_into(e2e_cross_signing_keys::table)
.values(NewDbCrossSigningKey {
user_id: user_id.to_owned(),
key_type: "user_signing".to_owned(),
key_data: serde_json::to_value(user_signing_key)?,
})
.execute(&mut connect()?)?;
}
if notify {
mark_signing_key_update(user_id)?;
}
Ok(())
}
pub fn sign_key(
target_user_id: &UserId,
target_device_id: &str,
signature: (String, String),
sender_id: &UserId,
) -> AppResult<()> {
// let cross_signing_key = e2e_cross_signing_keys::table
// .filter(e2e_cross_signing_keys::user_id.eq(target_id))
// .filter(e2e_cross_signing_keys::key_type.eq("master"))
// .order_by(e2e_cross_signing_keys::id.desc())
// .first::<DbCrossSigningKey>(&mut connect()?)?;
// let mut cross_signing_key: CrossSigningKey = serde_json::from_value(cross_signing_key.key_data.clone())?;
let origin_key_id = DeviceKeyId::parse(&signature.0)?.to_owned();
// cross_signing_key
// .signatures
// .entry(sender_id.to_owned())
// .or_defaut()
// .insert(key_id.clone(), signature.1);
diesel::insert_into(e2e_cross_signing_sigs::table)
.values(NewDbCrossSignature {
origin_user_id: sender_id.to_owned(),
origin_key_id,
target_user_id: target_user_id.to_owned(),
target_device_id: OwnedDeviceId::from(target_device_id),
signature: signature.1,
})
.execute(&mut connect()?)?;
mark_signing_key_update(target_user_id)
}
pub fn mark_signing_key_update(user_id: &UserId) -> AppResult<()> {
let changed_at = UnixMillis::now();
let joined_rooms = data::user::joined_rooms(user_id)?;
for room_id in &joined_rooms {
// // Don't send key updates to unencrypted rooms
// if state::get_state(&room_id, &StateEventType::RoomEncryption, "")?.is_none() {
// continue;
// }
let change = NewDbKeyChange {
user_id: user_id.to_owned(),
room_id: Some(room_id.to_owned()),
changed_at,
occur_sn: data::next_sn()?,
};
diesel::delete(
e2e_key_changes::table
.filter(e2e_key_changes::user_id.eq(user_id))
.filter(e2e_key_changes::room_id.eq(room_id)),
)
.execute(&mut connect()?)?;
diesel::insert_into(e2e_key_changes::table)
.values(&change)
.execute(&mut connect()?)?;
}
let change = NewDbKeyChange {
user_id: user_id.to_owned(),
room_id: None,
changed_at,
occur_sn: data::next_sn()?,
};
diesel::delete(
e2e_key_changes::table
.filter(e2e_key_changes::user_id.eq(user_id))
.filter(e2e_key_changes::room_id.is_null()),
)
.execute(&mut connect()?)?;
diesel::insert_into(e2e_key_changes::table)
.values(&change)
.execute(&mut connect()?)?;
if user_id.is_local() {
let remote_servers = room_joined_servers::table
.filter(room_joined_servers::room_id.eq_any(joined_rooms))
.select(room_joined_servers::server_id)
.distinct()
.load::<OwnedServerName>(&mut connect()?)?;
let content = SigningKeyUpdateContent::new(user_id.to_owned());
let edu = Edu::SigningKeyUpdate(content);
let _ = sending::send_edu_servers(remote_servers.into_iter(), &edu);
}
Ok(())
}
pub fn mark_device_key_update(user_id: &UserId, _device_id: &DeviceId) -> AppResult<()> {
let changed_at = UnixMillis::now();
let joined_rooms = data::user::joined_rooms(user_id)?;
let occur_sn = data::next_sn()?;
for room_id in &joined_rooms {
// comment for testing
// // Don't send key updates to unencrypted rooms
// if state::get_state(&room_id, &StateEventType::RoomEncryption, "")?.is_none() {
// continue;
// }
let change = NewDbKeyChange {
user_id: user_id.to_owned(),
room_id: Some(room_id.to_owned()),
changed_at,
occur_sn,
};
diesel::delete(
e2e_key_changes::table
.filter(e2e_key_changes::user_id.eq(user_id))
.filter(e2e_key_changes::room_id.eq(room_id)),
)
.execute(&mut connect()?)?;
diesel::insert_into(e2e_key_changes::table)
.values(&change)
.execute(&mut connect()?)?;
}
let change = NewDbKeyChange {
user_id: user_id.to_owned(),
room_id: None,
changed_at,
occur_sn,
};
diesel::delete(
e2e_key_changes::table
.filter(e2e_key_changes::user_id.eq(user_id))
.filter(e2e_key_changes::room_id.is_null()),
)
.execute(&mut connect()?)?;
diesel::insert_into(e2e_key_changes::table)
.values(&change)
.execute(&mut connect()?)?;
Ok(())
}
pub fn mark_device_key_update_with_joined_rooms(
user_id: &UserId,
_device_id: &DeviceId,
joined_rooms: &[OwnedRoomId],
) -> AppResult<()> {
let changed_at = UnixMillis::now();
let occur_sn = data::next_sn()?;
for room_id in joined_rooms {
let change = NewDbKeyChange {
user_id: user_id.to_owned(),
room_id: Some(room_id.to_owned()),
changed_at,
occur_sn,
};
diesel::delete(
e2e_key_changes::table
.filter(e2e_key_changes::user_id.eq(user_id))
.filter(e2e_key_changes::room_id.eq(room_id)),
)
.execute(&mut connect()?)?;
diesel::insert_into(e2e_key_changes::table)
.values(&change)
.execute(&mut connect()?)?;
}
Ok(())
}
pub fn send_device_key_update(user_id: &UserId, device_id: &DeviceId) -> AppResult<()> {
let joined_rooms = data::user::joined_rooms(user_id)?;
send_device_key_update_with_joined_rooms(user_id, device_id, &joined_rooms)
}
fn send_device_key_update_with_joined_rooms(
user_id: &UserId,
device_id: &DeviceId,
joined_rooms: &[OwnedRoomId],
) -> AppResult<()> {
if user_id.is_remote() {
return Ok(());
}
let remote_servers = room_joined_servers::table
.filter(room_joined_servers::room_id.eq_any(joined_rooms))
.select(room_joined_servers::server_id)
.distinct()
.load::<OwnedServerName>(&mut connect()?)?;
let content = DeviceListUpdateContent::new(
user_id.to_owned(),
device_id.to_owned(),
data::next_sn()? as u64,
);
let edu = Edu::DeviceListUpdate(content);
sending::send_edu_servers(remote_servers.into_iter(), &edu)
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
palpo-im/palpo | https://github.com/palpo-im/palpo/blob/066b5b15ce094d4e9f6d28484cbc9cb8bd913e67/crates/server/src/user/password.rs | crates/server/src/user/password.rs | use diesel::prelude::*;
use super::DbUser;
use crate::core::UnixMillis;
use crate::core::identifiers::*;
use crate::data::connect;
use crate::data::schema::*;
use crate::data::user::NewDbPassword;
use crate::{AppResult, MatrixError};
pub fn verify_password(user: &DbUser, password: &str) -> AppResult<()> {
if user.deactivated_at.is_some() {
return Err(MatrixError::user_deactivated("the user has been deactivated").into());
}
let hash = crate::user::get_password_hash(&user.id)
.map_err(|_| MatrixError::unauthorized("wrong username or password."))?;
if hash.is_empty() {
return Err(MatrixError::user_deactivated("the user has been deactivated").into());
}
let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false);
if !hash_matches {
Err(MatrixError::unauthorized("wrong username or password.").into())
} else {
Ok(())
}
}
pub fn get_password_hash(user_id: &UserId) -> AppResult<String> {
user_passwords::table
.filter(user_passwords::user_id.eq(user_id))
.order_by(user_passwords::id.desc())
.select(user_passwords::hash)
.first::<String>(&mut connect()?)
.map_err(Into::into)
}
/// Set/update password hash for a user
pub fn set_password(user_id: &UserId, password: &str) -> AppResult<()> {
let hash = crate::utils::hash_password(password)?;
diesel::insert_into(user_passwords::table)
.values(NewDbPassword {
user_id: user_id.to_owned(),
hash: hash.to_owned(),
created_at: UnixMillis::now(),
})
.execute(&mut connect()?)?;
Ok(())
}
| rust | Apache-2.0 | 066b5b15ce094d4e9f6d28484cbc9cb8bd913e67 | 2026-01-04T20:22:21.242775Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.