repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/controller/index_controller.rs | src/controller/index_controller.rs | use super::log_request;
use super::AppState;
use actix_web::{get, web, HttpResponse, Responder};
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(status);
}
#[get("/status")]
async fn status(data: web::Data<AppState<'_>>) -> impl Responder {
log_request("GET: /status", &data.connections);
HttpResponse::Ok().body("I am up")
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/controller/realm_controller.rs | src/controller/realm_controller.rs | use actix_web::{get, HttpResponse, Responder, web};
use crate::AppState;
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(get_realm_by_name);
}
#[get("/role/{id}")]
async fn get_realm_by_name(
name: web::Path<String>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
let a = app_state.context.realms.add_realm("foo").await;
let x = app_state.context.realms.get_realm_by_name(name.as_str()).await;
match x {
Err(_) => HttpResponse::NotFound().finish(),
Ok(realm) => HttpResponse::Ok().json(realm),
}
} | rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/user_dao.rs | src/dao/user_dao.rs | use super::Table;
use super::User;
impl<'c> Table<'c, User> {
pub async fn drop_table(&self) -> Result<(), sqlx::Error> {
sqlx::query("DROP TABLE IF EXISTS users;")
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn create_table(&self) -> Result<(), sqlx::Error> {
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS users (
id VARCHAR(48) NOT NULL UNIQUE,
name VARCHAR(64) NOT NULL UNIQUE,
email VARCHAR(256) NOT NULL UNIQUE,
PRIMARY KEY (id)
)"#,
)
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn get_user_by_id(&self, user_id: &str) -> Result<User, sqlx::Error> {
sqlx::query_as(
r#"
SELECT `id`, `name`, `email`
FROM `users`
WHERE `id` = ?"#,
)
.bind(user_id)
.fetch_one(&*self.pool)
.await
}
pub async fn add_user(&self, user: &User) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
INSERT INTO users (`id`, `name`, `email`)
VALUES(?, ?, ?)"#,
)
.bind(&user.id)
.bind(&user.name)
.bind(&user.email)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn update_user(&self, user: &User) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
UPDATE users
SET `name` = ?, `email` = ?
WHERE `id` = ?
"#,
)
.bind(&user.name)
.bind(&user.email)
.bind(&user.id)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn delete_user(&self, user_id: &str) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
DELETE FROM users
WHERE `id` = ?
"#,
)
.bind(user_id)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/user_to_role_dao.rs | src/dao/user_to_role_dao.rs | use super::Role;
use super::JoinTable;
use super::User;
impl<'c> JoinTable<'c, User, Role> {
pub async fn create_table(&self) -> Result<(), sqlx::Error> {
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS `users_to_groups`
(
`user_id` VARCHAR(48) NOT NULL,
`group_id` BIGINT UNSIGNED NOT NULL,
FOREIGN KEY (`user_id`) REFERENCES `users`(`id`),
FOREIGN KEY (`group_id`) REFERENCES `groups`(`id`)
)
"#,
)
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn drop_table(&self) -> Result<(), sqlx::Error> {
sqlx::query("DROP TABLE IF EXISTS users_to_groups")
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn add_user_groups(
&self,
user_id: &String,
groups: &Vec<Role>,
) -> Result<u64, sqlx::Error> {
if 0 == groups.len() {
Ok(0)
} else {
let insert_statement = build_insert_statement(groups.len());
let mut query = sqlx::query(&insert_statement);
for group in groups {
query = query.bind(user_id).bind(group.id)
}
query.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
}
pub async fn get_groups_by_user_id(&self, user_id: &String) -> Result<Vec<Role>, sqlx::Error> {
sqlx::query_as(
r#"
select * from `groups` as `a`
where `a`.`id` in (
select `b`.`group_id` from `users_to_groups` as `b`
where `b`.`user_id` = ?
)
"#,
)
.bind(user_id)
.fetch_all(&*self.pool)
.await
}
pub async fn delete_by_user_id(&self, user_id: &String) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
DELETE
FROM `users_to_groups`
WHERE `user_id` = ?
"#,
)
.bind(user_id)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn delete_by_group_id(&self, group_id: i32) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
DELETE
FROM `users_to_groups`
WHERE `group_id` = ?
"#,
)
.bind(group_id)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn update_user_groups(&self, user: &User) -> Result<u64, sqlx::Error> {
if 0 == user.groups.len() {
self.delete_by_user_id(&user.id).await
} else {
let deleted = self.delete_by_user_id(&user.id).await?;
let added = self.add_user_groups(&user.id, &user.groups).await?;
Ok(added + deleted)
}
}
}
static DEFAULT_INSERT: &'static str = r#"
INSERT INTO `users_to_groups` (`user_id`, `group_id`)
VALUES (?,?)
"#;
fn build_insert_statement(rows: usize) -> String {
let mut insert = String::from(DEFAULT_INSERT);
match rows {
1 | 0 => insert,
_ => {
let mut i = 1;
while i < rows {
insert.push_str(", (?,?)");
i += 1;
}
insert
}
}
}
#[cfg(test)]
mod test {
use super::{build_insert_statement, DEFAULT_INSERT};
#[test]
fn build_insert_statement_returns_default_string_when_input_is_zero_or_one() {
let results = vec![build_insert_statement(0), build_insert_statement(1)];
assert_eq!(results[0], results[1]);
assert_eq!(results[0], DEFAULT_INSERT);
}
#[test]
fn build_insert_statement_returns_n_parameters_when_input_is_n() {
let result = build_insert_statement(3);
assert_eq!(
format!("{0}{1}{2}", DEFAULT_INSERT, ", (?,?)", ", (?,?)"),
result
);
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/mod.rs | src/dao/mod.rs | use super::model::{Group, User};
pub mod db_context;
mod group_dao;
mod user_dao;
mod user_to_group_dao;
pub type Database<'c> = db_context::Database<'c>;
pub type Table<'c, T> = db_context::Table<'c, T>;
pub type JoinTable<'c, T1, T2> = db_context::JoinTable<'c, T1, T2>;
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/realm_dao.rs | src/dao/realm_dao.rs | use crate::dao::db_context::{Table};
use crate::model::{Count, Realm};
impl<'c> Table<'c, Realm> {
pub async fn add_realm(&self, name: &str) -> Result<u64, sqlx::Error> {
let realm = Realm::from(name);
sqlx::query(
r#"
INSERT INTO `roles` (`name`, `max`)
VALUES (?, ?)
"#,
)
.bind(realm.name)
.bind(realm.created_on_utc)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn add_default_realm(&self) -> Result<Option<Realm>, sqlx::Error> {
let count: Count = sqlx::query_as("SELECT COUNT(*) FROM `realms` WHERE `id` = 0")
.fetch_one(&*self.pool)
.await?;
if count.into_inner() == 1 {
return Ok(None);
}
let realm = Realm::default();
sqlx::query(
r#"
INSERT INTO `realms` (`name`, `created_on_utc`)
VALUES (?, ?)
"#,
)
.bind(&realm.name)
.bind(&realm.created_on_utc)
.execute(&*self.pool)
.await
.map(|_|Some(realm))
}
pub async fn get_realm_by_id(&self, id: &i32) -> Result<Realm, sqlx::Error> {
sqlx::query_as(
r#"
SELECT `id`, `name`, `created_on_utc`
FROM `realms`
WHERE `id` = ?
"#,
)
.bind(id)
.fetch_one(&*self.pool)
.await
}
pub async fn get_realm_by_name(&self, name: &str) -> Result<Realm, sqlx::Error> {
sqlx::query_as(
r#"
SELECT `id`, `name`, `created_on_utc`
FROM `realms`
WHERE `name` = ?
"#,
)
.bind(name)
.fetch_one(&*self.pool)
.await
}
} | rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/group_dao.rs | src/dao/group_dao.rs | use super::Group;
use super::Table;
impl<'c> Table<'c, Group> {
pub async fn create_table(&self) -> Result<(), sqlx::Error> {
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS `groups`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`name` VARCHAR(64) NOT NULL UNIQUE,
PRIMARY KEY(id)
)
"#,
)
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn drop_table(&self) -> Result<(), sqlx::Error> {
sqlx::query("DROP TABLE IF EXISTS `groups`")
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn get_group_by_id(&self, id: u64) -> Result<Group, sqlx::Error> {
sqlx::query_as(
r#"
SELECT `id`, `name`
FROM `groups`
WHERE `id` = ?
"#,
)
.bind(id)
.fetch_one(&*self.pool)
.await
}
pub async fn get_group_by_name(&self, name: &str) -> Result<Group, sqlx::Error> {
sqlx::query_as(
r#"
SELECT `id`, `name`
FROM `groups`
WHERE `name` = ?
"#,
)
.bind(name)
.fetch_one(&*self.pool)
.await
}
pub async fn add_group(&self, name: &str) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
INSERT INTO `groups` (`name`)
VALUES (?)
"#,
)
.bind(name)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn update_group(&self, current: &str, update: &str) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
UPDATE `groups`
SET `name` = ?
WHERE `name` = ?
"#,
)
.bind(update)
.bind(current)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn delete_group(&self, name: &str) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
DELETE FROM `groups`
WHERE `name` = ?
"#,
)
.bind(name)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/db_context.rs | src/dao/db_context.rs | use super::{Group, User};
use sqlx::mysql::MySqlRow;
use sqlx::{FromRow, MySqlPool};
use std::marker::PhantomData;
use std::sync::Arc;
pub struct Table<'c, T>
where
T: FromRow<'c, MySqlRow>,
{
pub pool: Arc<MySqlPool>,
_from_row: fn(&'c MySqlRow) -> Result<T, sqlx::Error>,
_marker: PhantomData<&'c T>,
}
impl<'c, T> Table<'c, T>
where
T: FromRow<'c, MySqlRow>,
{
fn new(pool: Arc<MySqlPool>) -> Self {
Table {
pool,
_from_row: T::from_row,
_marker: PhantomData,
}
}
}
pub struct JoinTable<'c, T1, T2>
where
T1: FromRow<'c, MySqlRow>,
T2: FromRow<'c, MySqlRow>,
{
pub pool: Arc<MySqlPool>,
_from_row: (
fn(&'c MySqlRow) -> Result<T1, sqlx::Error>,
fn(&'c MySqlRow) -> Result<T2, sqlx::Error>,
),
_marker_t1: PhantomData<&'c T1>,
_marker_t2: PhantomData<&'c T2>,
}
impl<'c, T1, T2> JoinTable<'c, T1, T2>
where
T1: FromRow<'c, MySqlRow>,
T2: FromRow<'c, MySqlRow>,
{
fn new(pool: Arc<MySqlPool>) -> Self {
JoinTable {
pool,
_from_row: (T1::from_row, T2::from_row),
_marker_t1: PhantomData,
_marker_t2: PhantomData,
}
}
}
pub struct Database<'c> {
pub groups: Arc<Table<'c, Group>>,
pub users: Arc<Table<'c, User>>,
pub users_to_groups: Arc<JoinTable<'c, User, Group>>,
}
impl<'a> Database<'a> {
pub async fn new(sql_url: &String) -> Database<'a> {
let connection = MySqlPool::connect(&sql_url).await.unwrap();
let pool = Arc::new(connection);
Database {
groups: Arc::from(Table::new(pool.clone())),
users: Arc::from(Table::new(pool.clone())),
users_to_groups: Arc::from(JoinTable::new(pool.clone())),
}
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/role_dao.rs | src/dao/role_dao.rs | use crate::model::Realm;
use super::Role;
use super::Table;
impl<'c> Table<'c, Role> {
pub async fn get_role_by_id(&self, id: i32) -> Result<Role, sqlx::Error> {
sqlx::query_as(
r#"
SELECT `id`, `max`, `name`
FROM `roles`
WHERE `id` = ?
"#,
)
.bind(id)
.fetch_one(&*self.pool)
.await
}
pub async fn get_role_by_name(&self, name: &str) -> Result<Role, sqlx::Error> {
sqlx::query_as(
r#"
SELECT `id`, `max`, `name`
FROM `roles`
WHERE `name` = ?
"#,
)
.bind(name)
.fetch_one(&*self.pool)
.await
}
pub async fn add_role(&self, realm: &Realm, name: &str, max: &Option<i32>) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
INSERT INTO `roles` (`name`, `max`, `realm_id`)
VALUES (?, ?, ?)
"#,
)
.bind(name)
.bind(max)
.bind(realm.id)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn update_role(&self, current: &str, update: &str) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
UPDATE `roles`
SET `name` = ?
WHERE `name` = ?
"#,
)
.bind(update)
.bind(current)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn delete_role(&self, name: &str) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
DELETE FROM `roles`
WHERE `name` = ?
"#,
)
.bind(name)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/dao/user_to_group_dao.rs | src/dao/user_to_group_dao.rs | use super::Group;
use super::JoinTable;
use super::User;
impl<'c> JoinTable<'c, User, Group> {
pub async fn create_table(&self) -> Result<(), sqlx::Error> {
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS `users_to_groups`
(
`user_id` VARCHAR(48) NOT NULL,
`group_id` BIGINT UNSIGNED NOT NULL,
FOREIGN KEY (`user_id`) REFERENCES `users`(`id`),
FOREIGN KEY (`group_id`) REFERENCES `groups`(`id`)
)
"#,
)
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn drop_table(&self) -> Result<(), sqlx::Error> {
sqlx::query("DROP TABLE IF EXISTS users_to_groups")
.execute(&*self.pool)
.await
.map(|_|())
}
pub async fn add_user_groups(
&self,
user_id: &String,
groups: &Vec<Group>,
) -> Result<u64, sqlx::Error> {
if 0 == groups.len() {
Ok(0)
} else {
let insert_statement = build_insert_statement(groups.len());
let mut query = sqlx::query(&insert_statement);
for group in groups {
query = query.bind(user_id).bind(group.id)
}
query.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
}
pub async fn get_groups_by_user_id(&self, user_id: &String) -> Result<Vec<Group>, sqlx::Error> {
sqlx::query_as(
r#"
select * from `groups` as `a`
where `a`.`id` in (
select `b`.`group_id` from `users_to_groups` as `b`
where `b`.`user_id` = ?
)
"#,
)
.bind(user_id)
.fetch_all(&*self.pool)
.await
}
pub async fn delete_by_user_id(&self, user_id: &String) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
DELETE
FROM `users_to_groups`
WHERE `user_id` = ?
"#,
)
.bind(user_id)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn delete_by_group_id(&self, group_id: u64) -> Result<u64, sqlx::Error> {
sqlx::query(
r#"
DELETE
FROM `users_to_groups`
WHERE `group_id` = ?
"#,
)
.bind(group_id)
.execute(&*self.pool)
.await
.map(|x|x.rows_affected())
}
pub async fn update_user_groups(&self, user: &User) -> Result<u64, sqlx::Error> {
if 0 == user.groups.len() {
self.delete_by_user_id(&user.id).await
} else {
let deleted = self.delete_by_user_id(&user.id).await?;
let added = self.add_user_groups(&user.id, &user.groups).await?;
Ok(added + deleted)
}
}
}
static DEFAULT_INSERT: &'static str = r#"
INSERT INTO `users_to_groups` (`user_id`, `group_id`)
VALUES (?,?)
"#;
fn build_insert_statement(rows: usize) -> String {
let mut insert = String::from(DEFAULT_INSERT);
match rows {
1 | 0 => insert,
_ => {
let mut i = 1;
while i < rows {
insert.push_str(", (?,?)");
i += 1;
}
insert
}
}
}
#[cfg(test)]
mod test {
use super::{build_insert_statement, DEFAULT_INSERT};
#[test]
fn build_insert_statement_returns_default_string_when_input_is_zero_or_one() {
let results = vec![build_insert_statement(0), build_insert_statement(1)];
assert_eq!(results[0], results[1]);
assert_eq!(results[0], DEFAULT_INSERT);
}
#[test]
fn build_insert_statement_returns_n_parameters_when_input_is_n() {
let result = build_insert_statement(3);
assert_eq!(
format!("{0}{1}{2}", DEFAULT_INSERT, ", (?,?)", ", (?,?)"),
result
);
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/model/user.rs | src/model/user.rs | use super::Group;
use serde::{Deserialize, Serialize};
use sqlx::mysql::MySqlRow;
use sqlx::{FromRow, Row};
#[derive(Serialize, Deserialize, Clone)]
pub struct User {
pub id: String,
pub name: String,
pub email: String,
pub groups: Vec<Group>,
}
impl<'c> FromRow<'c, MySqlRow> for User {
fn from_row(row: &MySqlRow) -> Result<Self, sqlx::Error> {
Ok(User {
id: row.get(0),
name: row.get(1),
email: row.get(2),
groups: Vec::with_capacity(0),
})
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/model/count.rs | src/model/count.rs | use sqlx::{Error, FromRow, Row};
use sqlx::sqlite::SqliteRow;
pub struct Count(i32);
impl Count {
pub fn into_inner(self) -> i32 {
self.0
}
}
impl<'r> FromRow<'r, SqliteRow> for Count {
fn from_row(row: &'r SqliteRow) -> Result<Self, Error> {
row.try_get(0).map(|x|Count(x))
}
} | rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/model/password.rs | src/model/password.rs | use std::convert::TryFrom;
use argon2::{
password_hash::{
rand_core::OsRng, PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Argon2
};
use argon2::password_hash::rand_core::RngCore;
pub(crate) struct Password([u8;32]);
impl TryFrom<&str> for Password {
type Error = argon2::Error;
fn try_from(value: &str) -> Result<Self,argon2::Error> {
let argon2 = Argon2::default();
let mut salt = [0u8; 16];
OsRng.fill_bytes(&mut salt);
let mut bytes = [0u8; 32];
argon2.hash_password_into(value.as_bytes(), &salt, &mut bytes)?;
Ok(Password(bytes))
}
}
impl PartialEq for Password {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
fn ne(&self, other: &Self) -> bool {
!self.eq(other)
}
}
#[cfg(test)]
mod password_test {
use std::convert::TryFrom;
use super::Password;
#[test]
fn try_from_test() {
let password = Password::try_from("password123");
assert!(password.is_ok());
/*let other = Password::try_from("password123");
assert!(password.is_ok());
let password = password.unwrap();
let other = other.unwrap();
assert_eq!(password, other);*/
}
} | rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/model/group.rs | src/model/group.rs | use serde::{Deserialize, Serialize};
use sqlx::mysql::MySqlRow;
use sqlx::{FromRow, Row};
#[derive(Serialize, Deserialize, PartialEq, Clone)]
pub struct Group {
pub id: u64,
pub name: String,
}
impl<'c> FromRow<'c, MySqlRow> for Group {
fn from_row(row: &MySqlRow) -> Result<Self, sqlx::Error> {
Ok(Group {
id: row.get(0),
name: row.get(1),
})
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/model/mod.rs | src/model/mod.rs | mod group;
mod user;
pub type User = user::User;
pub type Group = group::Group;
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/model/realm.rs | src/model/realm.rs | use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use sqlx::{Error, FromRow, Row};
use sqlx::sqlite::SqliteRow;
use crate::model::{Role, User};
#[derive(Serialize, Deserialize, PartialEq, Clone)]
pub struct Realm {
pub id: i32,
pub name: String,
pub created_on_utc: DateTime<Utc>,
pub roles: Vec<Role>,
pub users: Vec<User>,
}
impl Default for Realm {
fn default() -> Self {
Realm {
id: 0,
name: String::from("master"),
created_on_utc: Utc::now(),
roles: vec![Role::default()],
users: vec![User::default()]
}
}
}
impl From<&str> for Realm {
fn from(name: &str) -> Self {
Realm {
id: 0,
name: name.to_string(),
created_on_utc: Utc::now(),
roles: Vec::with_capacity(0),
users: Vec::with_capacity(0)
}
}
}
impl<'c> FromRow<'c, SqliteRow> for Realm {
fn from_row(row: &'c SqliteRow) -> Result<Self, Error> {
Ok(Realm {
id: row.get(0),
name: row.get(1),
created_on_utc: row.get(2),
roles: Vec::with_capacity(0),
users: Vec::with_capacity(0)
})
}
} | rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/model/role.rs | src/model/role.rs | use serde::{Deserialize, Serialize};
use sqlx::{FromRow, Row};
use sqlx::sqlite::SqliteRow;
#[derive(Serialize, Deserialize, PartialEq, Clone)]
pub struct Role {
pub id: i32,
pub max: Option<i32>,
pub name: String,
}
impl Default for Role {
fn default() -> Self {
Role {
id: 0,
max: Some(1),
name: String::from("root"),
}
}
}
impl<'c> FromRow<'c, SqliteRow> for Role {
fn from_row(row: &SqliteRow) -> Result<Self, sqlx::Error> {
Ok(Role {
id: row.get(0),
max: row.get(1),
name: row.get(2),
})
}
}
impl From<&'static str> for Role {
fn from(role: &'static str) -> Self {
Role {
id: 0,
max: None,
name: role.to_string()
}
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/test.rs | tests/test.rs | use sqlx_user_crud::config::Config;
use sqlx_user_crud::dao::Database;
use uuid::Uuid;
fn randomize_string(input: &'static str) -> String {
format!("{0}{1}", input, Uuid::new_v4().to_string())
}
async fn init_db_context() -> Database<'static> {
let config = Config::from_file("test_resource/config.test.json");
Database::new(&config.get_database_url()).await
}
#[cfg(test)]
mod controller_test;
#[cfg(test)]
mod dao_test;
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/controller_test/index_controller_test.rs | tests/controller_test/index_controller_test.rs | use super::init_db_context;
use actix_web::{test, web, App};
use sqlx_user_crud::{controller, AppState};
use std::sync::{Arc, Mutex};
#[actix_rt::test]
async fn status_returns_ok_and_message() -> () {
let db_context = init_db_context().await;
let app_state = web::Data::new(AppState {
connections: Mutex::new(0),
context: Arc::new(db_context),
});
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_index_controller),
)
.await;
let req = test::TestRequest::get().uri("/status").to_request();
let resp = test::call_service(&mut app, req).await;
assert!(resp.status().is_success());
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/controller_test/group_controller_test.rs | tests/controller_test/group_controller_test.rs | use super::{init_app_state, randomize_string};
use actix_web::{http, test, App};
use sqlx;
use sqlx_user_crud::controller;
use sqlx_user_crud::controller::group_controller::GroupUpdate;
#[actix_rt::test]
async fn get_group_returns_404_when_not_found() -> () {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_group_controller),
)
.await;
let req = test::TestRequest::get().uri("/group/0").to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn post_group_returns_204_when_valid_group_is_added() -> () {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_group_controller),
)
.await;
let group_name = randomize_string("user");
let req = test::TestRequest::post()
.uri("/group")
.set_json(&group_name)
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::ACCEPTED);
}
#[actix_rt::test]
async fn patch_group_returns_204_when_group_is_patched() -> Result<(), sqlx::Error> {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_group_controller),
)
.await;
let group_name = randomize_string("administrator");
let _ = app_state.context.groups.add_group(&group_name).await?;
let update = GroupUpdate {
old: group_name,
new: randomize_string("Administrator"),
};
let req = test::TestRequest::patch()
.uri("/group")
.set_json(&update)
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::ACCEPTED);
Ok(())
}
#[actix_rt::test]
async fn delete_group_returns_200_when_group_is_deleted() -> Result<(), sqlx::Error> {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_group_controller),
)
.await;
let group_name = randomize_string("developers");
let _ = app_state.context.groups.add_group(&group_name).await?;
let req = test::TestRequest::delete()
.uri(&format!("/group/{0}", group_name))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::OK);
Ok(())
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/controller_test/mod.rs | tests/controller_test/mod.rs | use super::{init_db_context, randomize_string};
use actix_web::web;
use actix_web::web::Data;
use sqlx_user_crud::AppState;
use std::sync::{Arc, Mutex};
async fn init_app_state() -> Data<AppState<'static>> {
let db_context = init_db_context().await;
web::Data::new(AppState {
connections: Mutex::new(0),
context: Arc::new(db_context),
})
}
mod group_controller_test;
#[cfg(test)]
mod index_controller_test;
mod user_controller_test;
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/controller_test/user_controller_test.rs | tests/controller_test/user_controller_test.rs | use super::init_app_state;
use crate::randomize_string;
use actix_web::{http, test, App};
use sqlx_user_crud::controller;
use sqlx_user_crud::model::User;
use uuid::Uuid;
#[actix_rt::test]
async fn get_user_returns_err_when_not_found() -> () {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let req = test::TestRequest::get().uri("/user/n0t-f0un5").to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::NOT_FOUND);
}
#[actix_rt::test]
async fn get_user_returns_200_when_user_exists() -> Result<(), sqlx::Error> {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("alice"),
email: randomize_string("alice@email.com"),
groups: Vec::new(),
};
let _ = app_state.context.users.add_user(&user).await?;
let req = test::TestRequest::get()
.uri(&format!("/user/{0}", user.id))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::OK);
Ok(())
}
#[actix_rt::test]
async fn post_user_returns_202_when_user_is_valid() -> () {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("bob"),
email: randomize_string("bob@email.com"),
groups: Vec::new(),
};
let req = test::TestRequest::post()
.uri("/user")
.set_json(&user)
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::ACCEPTED)
}
#[actix_rt::test]
async fn post_user_returns_202_when_user_and_groups_are_valid() -> Result<(), sqlx::Error>
{
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let group = randomize_string("custodians");
let _ = app_state.context.groups.add_group(&group).await?;
let group = app_state.context.groups.get_group_by_name(&group).await?;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("bob"),
email: randomize_string("bob@email.com"),
groups: vec![group],
};
let req = test::TestRequest::post()
.uri("/user")
.set_json(&user)
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::ACCEPTED);
Ok(())
}
#[actix_rt::test]
async fn post_user_returns_500_when_user_already_exists() -> Result<(), sqlx::Error> {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("charlie"),
email: randomize_string("charlie@email.com"),
groups: vec![],
};
let _ = app_state.context.users.add_user(&user).await?;
let req = test::TestRequest::post()
.uri("/user")
.set_json(&user)
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::INTERNAL_SERVER_ERROR);
Ok(())
}
#[actix_rt::test]
async fn patch_user_returns_404_when_user_does_not_exist() -> () {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("edison"),
email: randomize_string("edison@email.com"),
groups: vec![],
};
let req = test::TestRequest::patch()
.uri("/user")
.set_json(&user)
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::NOT_FOUND)
}
#[actix_rt::test]
async fn patch_user_returns_202_when_user_exists() -> Result<(), sqlx::Error> {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let mut user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("fred"),
email: randomize_string("fred@email.com"),
groups: vec![],
};
let _ = app_state.context.users.add_user(&user).await?;
user.name = randomize_string("fredrick");
let req = test::TestRequest::patch()
.uri("/user")
.set_json(&user)
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::ACCEPTED);
Ok(())
}
#[actix_rt::test]
async fn delete_user_returns_404_when_user_does_not_exist() -> () {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let user_id = Uuid::new_v4().to_string();
let req = test::TestRequest::delete()
.uri(&format!("/user/{0}", user_id))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::NOT_FOUND)
}
#[actix_rt::test]
async fn delete_user_returns_200_when_user_exists() -> Result<(), sqlx::Error> {
let app_state = init_app_state().await;
let mut app = test::init_service(
App::new()
.app_data(app_state.clone())
.configure(controller::init_user_controller),
)
.await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("gina"),
email: randomize_string("gina@email.com"),
groups: vec![],
};
let _ = app_state.context.users.add_user(&user).await?;
let req = test::TestRequest::delete()
.uri(&format!("/user/{0}", &user.id))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), http::StatusCode::OK);
Ok(())
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/dao_test/user_to_group_dao_test.rs | tests/dao_test/user_to_group_dao_test.rs | use super::{init_db_context, randomize_string};
use sqlx;
use sqlx_user_crud::model::{Group, User};
use uuid::Uuid;
#[actix_rt::test]
async fn add_user_groups_returns_1_when_user_is_associated_with_group(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("alice"),
email: randomize_string("alice@email.com"),
groups: Vec::with_capacity(0),
};
let group = randomize_string("user");
let _ = db.users.add_user(&user).await?;
let _ = db.groups.add_group(&group).await?;
let group = db.groups.get_group_by_name(&group).await?;
let groups = vec![group];
let result = db.users_to_groups.add_user_groups(&user.id, &groups).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn add_user_groups_returns_3_when_user_is_associated_with_3_groups(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("bob"),
email: randomize_string("bob@email.com"),
groups: Vec::with_capacity(0),
};
let group_names = vec![
randomize_string("engineer"),
randomize_string("architect"),
randomize_string("tester"),
];
let _ = db.users.add_user(&user).await?;
for group_name in group_names.iter() {
let _ = db.groups.add_group(group_name).await?;
}
let mut groups = Vec::with_capacity(3);
for group_name in group_names.iter() {
let group = db.groups.get_group_by_name(group_name).await?;
groups.push(group);
}
let result = db.users_to_groups.add_user_groups(&user.id, &groups).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(3, result);
Ok(())
}
#[actix_rt::test]
async fn add_user_groups_returns_err_when_group_does_not_exist(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("charlie"),
email: randomize_string("charlie@email.com"),
groups: Vec::with_capacity(0),
};
let groups = vec![Group {
id: 0,
name: String::from("non-existent"),
}];
let _ = db.users.add_user(&user).await?;
let result = db.users_to_groups.add_user_groups(&user.id, &groups).await;
assert!(result.is_err());
Ok(())
}
#[actix_rt::test]
async fn add_user_groups_returns_err_when_user_does_not_exist() -> Result<(), sqlx::Error>
{
let db = init_db_context().await;
let group_name = randomize_string("hackers");
let _ = db.groups.add_group(&group_name).await?;
let group = db.groups.get_group_by_name(&group_name).await?;
let groups = vec![group];
let result = db
.users_to_groups
.add_user_groups(&Uuid::new_v4().to_string(), &groups)
.await;
assert!(result.is_err());
Ok(())
}
#[actix_rt::test]
async fn get_groups_by_user_id_returns_users_groups() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("candice"),
email: randomize_string("candice@email.com"),
groups: Vec::with_capacity(0),
};
let group = randomize_string("vendor");
{
let _ = db.users.add_user(&user).await?;
let _ = db.groups.add_group(&group).await?;
let group = db.groups.get_group_by_name(&group).await?;
let groups = vec![group];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
let result = db.users_to_groups.get_groups_by_user_id(&user.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result.len());
assert_eq!(group, result[0].name);
Ok(())
}
#[actix_rt::test]
async fn get_groups_by_user_id_returns_empty_vec_when_user_does_not_exist() -> () {
let db = init_db_context().await;
let user_id = Uuid::new_v4().to_string();
let result = db.users_to_groups.get_groups_by_user_id(&user_id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result.len());
}
#[actix_rt::test]
async fn delete_by_user_id_returns_0_when_user_id_does_not_exist() -> () {
let db = init_db_context().await;
let user_id = Uuid::new_v4().to_string();
let result = db.users_to_groups.delete_by_user_id(&user_id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn delete_by_user_id_returns_number_of_rows_deleted() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("donald"),
email: randomize_string("donald@email.com"),
groups: Vec::with_capacity(0),
};
let group = randomize_string("customer");
{
let _ = db.users.add_user(&user).await?;
let _ = db.groups.add_group(&group).await?;
let group = db.groups.get_group_by_name(&group).await?;
let groups = vec![group];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
let result = db.users_to_groups.delete_by_user_id(&user.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn delete_by_group_id_returns_0_when_group_id_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.users_to_groups.delete_by_group_id(0).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn delete_by_group_id_returns_number_of_rows_deleted() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("emily"),
email: randomize_string("emily@email.com"),
groups: Vec::with_capacity(0),
};
let group = randomize_string("executive");
{
let _ = db.users.add_user(&user).await?;
let _ = db.groups.add_group(&group).await?;
}
let group = db.groups.get_group_by_name(&group).await?;
{
let groups = vec![group.clone()];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
let result = db.users_to_groups.delete_by_group_id(group.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn update_user_groups_deletes_rows_when_users_group_vec_is_empty(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("frank"),
email: randomize_string("frank@email.com"),
groups: vec![],
};
let group_name = randomize_string("faculty");
{
let _ = db.users.add_user(&user).await?;
let _ = db.groups.add_group(&group_name).await?;
}
let group = db.groups.get_group_by_name(&group_name).await?;
{
let groups = vec![group.clone()];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
// Assert the function returns 1 modification
let result = db.users_to_groups.update_user_groups(&user).await;
assert!(result.is_ok());
assert_eq!(1, result.unwrap());
// Verify the function has altered the table
let result = db.users_to_groups.get_groups_by_user_id(&user.id).await?;
assert_eq!(0, result.len());
Ok(())
}
#[actix_rt::test]
async fn update_user_groups_returns_deleted_plus_added_rows_when_groups_is_not_empty(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let mut user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("george"),
email: randomize_string("george@email.com"),
groups: vec![],
};
let group_names = vec![randomize_string("general"), randomize_string("gossiper")];
{
let _ = db.users.add_user(&user).await?;
let _ = db.groups.add_group(&group_names[0]).await?;
let _ = db.groups.add_group(&group_names[1]).await?;
}
let groups = vec![
db.groups.get_group_by_name(&group_names[0]).await?,
db.groups.get_group_by_name(&group_names[1]).await?,
];
{
let groups = vec![groups[0].clone()];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
user.groups = groups;
// Assert the function returns 1 modification
let result = db.users_to_groups.update_user_groups(&user).await;
assert!(result.is_ok());
assert_eq!(3, result.unwrap());
Ok(())
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/dao_test/db_context_test.rs | tests/dao_test/db_context_test.rs | use sqlx_user_crud::config::Config;
use sqlx_user_crud::dao::Database;
#[actix_rt::test]
async fn new_returns_db_context_when_url_is_valid() {
let config = Config::from_file("test_resource/config.test.json");
let _db_context = Database::new(&config.get_database_url()).await;
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/dao_test/group_dao_test.rs | tests/dao_test/group_dao_test.rs | use super::{init_db_context, randomize_string};
use sqlx;
#[actix_rt::test]
async fn add_group_returns_1_when_group_is_valid() -> () {
let db = init_db_context().await;
let group_name = randomize_string("users");
let result = db.groups.add_group(&group_name).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
}
#[actix_rt::test]
async fn add_group_returns_err_when_group_already_exists() -> () {
let db = init_db_context().await;
let group_name = randomize_string("administrators");
let _ = db.groups.add_group(&group_name).await;
let result = db.groups.add_group(&group_name).await;
assert!(result.is_err());
}
#[actix_rt::test]
async fn get_group_by_name_returns_group_when_name_exists() -> () {
let db = init_db_context().await;
let group_name = randomize_string("accountants");
let _ = db.groups.add_group(&group_name).await;
let result = db.groups.get_group_by_name(&group_name).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(group_name, result.name);
assert!(1 <= result.id);
}
#[actix_rt::test]
async fn get_group_by_name_returns_err_when_group_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.groups.get_group_by_name("not found").await;
assert!(result.is_err());
}
#[actix_rt::test]
async fn get_group_by_id_returns_group_when_id_is_valid() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let group_name = randomize_string("engineers");
let _ = db.groups.add_group(&group_name).await?;
let group = db.groups.get_group_by_name(&group_name).await?;
let result = db.groups.get_group_by_id(group.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(group.id, result.id);
assert_eq!(group.name, result.name);
Ok(())
}
#[actix_rt::test]
async fn update_group_returns_1_when_group_has_been_updated() -> Result<(), sqlx::Error>
{
let db = init_db_context().await;
let group_name = randomize_string("testers");
let _ = db.groups.add_group(&group_name).await?;
let result = db
.groups
.update_group(&group_name, &randomize_string("qa testers"))
.await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn update_group_returns_0_when_group_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.groups.update_group("not found", "still not found").await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn delete_group_returns_1_when_group_can_be_deleted() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let group_name = randomize_string("executives");
let _ = db.groups.add_group(&group_name).await?;
let result = db.groups.delete_group(&group_name).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn delete_group_returns_0_when_group_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.groups.delete_group("not found").await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/dao_test/role_dao_test.rs | tests/dao_test/role_dao_test.rs | use super::{init_db_context, randomize_string};
use sqlx;
use sqlx_user_crud::model::Realm;
#[actix_rt::test]
async fn add_group_returns_1_when_group_is_valid() -> () {
let db = init_db_context().await;
let group_name = randomize_string("users");
let result = db.roles.add_role(&Realm::default(), &group_name, &None).await;
if result.is_err() {
let e = result.unwrap_err();
eprintln!("{}", e);
}
/*assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);*/
}
#[actix_rt::test]
async fn add_group_returns_err_when_group_already_exists() -> () {
let db = init_db_context().await;
let group_name = randomize_string("administrators");
let _ = db.roles.add_role(&Realm::default(), &group_name, &None).await;
let result = db.roles.add_role(&Realm::default(), &group_name, &None).await;
assert!(result.is_err());
}
#[actix_rt::test]
async fn get_group_by_name_returns_group_when_name_exists() -> () {
let db = init_db_context().await;
let group_name = randomize_string("accountants");
let _ = db.roles.add_role(&Realm::default(), &group_name, &None).await;
let result = db.roles.get_role_by_name(&group_name).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(group_name, result.name);
assert!(1 <= result.id);
}
#[actix_rt::test]
async fn get_group_by_name_returns_err_when_group_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.roles.get_role_by_name("not found").await;
assert!(result.is_err());
}
#[actix_rt::test]
async fn get_group_by_id_returns_group_when_id_is_valid() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let group_name = randomize_string("engineers");
let _ = db.roles.add_role(&Realm::default(), &group_name, &None).await?;
let group = db.roles.get_role_by_name(&group_name).await?;
let result = db.roles.get_role_by_id(group.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(group.id, result.id);
assert_eq!(group.name, result.name);
Ok(())
}
#[actix_rt::test]
async fn update_group_returns_1_when_group_has_been_updated() -> Result<(), sqlx::Error>
{
let db = init_db_context().await;
let group_name = randomize_string("testers");
let _ = db.roles.add_role(&Realm::default(), &group_name, &None).await?;
let result = db
.roles
.update_role(&group_name, &randomize_string("qa testers"))
.await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn update_group_returns_0_when_group_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.roles.update_role("not found", "still not found").await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn delete_group_returns_1_when_group_can_be_deleted() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let group_name = randomize_string("executives");
let _ = db.roles.add_role(&Realm::default(), &group_name, &None).await?;
let result = db.roles.delete_role(&group_name).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn delete_group_returns_0_when_group_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.roles.delete_role("not found").await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/dao_test/user_dao_test.rs | tests/dao_test/user_dao_test.rs | use super::{init_db_context, randomize_string};
use sqlx;
use sqlx_user_crud::model::User;
use uuid::Uuid;
#[actix_rt::test]
async fn add_user_returns_1() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("alice"),
email: randomize_string("alice@email.com"),
groups: Vec::with_capacity(0),
};
let result = db.users.add_user(&user).await;
assert!(result.is_ok());
assert_eq!(1, result.unwrap());
Ok(())
}
#[actix_rt::test]
async fn add_user_returns_err_when_duplicate_username_is_added(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let name = randomize_string("bob");
let email = randomize_string("bob@emai.com");
let original = User {
id: Uuid::new_v4().to_string(),
name: name.clone(),
email: email.clone(),
groups: Vec::with_capacity(0),
};
let duplicate = User {
id: Uuid::new_v4().to_string(),
name: name.clone(),
email: email.clone(),
groups: Vec::with_capacity(0),
};
let result = db.users.add_user(&original).await?;
assert_eq!(1, result);
let result = db.users.add_user(&duplicate).await;
assert!(result.is_err());
Ok(())
}
#[actix_rt::test]
async fn get_user_by_id_returns_error_when_user_does_not_exist() -> () {
let db = init_db_context().await;
let id = Uuid::new_v4().to_string();
let result = db.users.get_user_by_id(&id).await;
assert!(result.is_err());
}
#[actix_rt::test]
async fn get_user_by_id_returns_user_when_user_exists() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("charlie"),
email: randomize_string("charlie@email.com"),
groups: Vec::with_capacity(0),
};
let _ = db.users.add_user(&user).await?;
let result = db.users.get_user_by_id(&user.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(user.name, result.name);
assert_eq!(user.email, result.email);
Ok(())
}
#[actix_rt::test]
async fn update_user_returns_zero_when_user_does_not_exist() -> () {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("david"),
email: randomize_string("david@email.com"),
groups: Vec::with_capacity(0),
};
let result = db.users.update_user(&user).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn update_user_returns_1_when_user_exists() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("emily"),
email: randomize_string("emily@email.com"),
groups: Vec::with_capacity(0),
};
let _ = db.users.add_user(&user).await?;
let mut updated_user = user.clone();
updated_user.email = randomize_string("emily_edison");
let result = db.users.update_user(&updated_user).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn delete_user_returns_0_when_user_does_not_exist() -> () {
let db = init_db_context().await;
let id = Uuid::new_v4().to_string();
let result = db.users.delete_user(&id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn delete_user_returns_1_when_user_exists() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User {
id: Uuid::new_v4().to_string(),
name: randomize_string("gary"),
email: randomize_string("gary@email.com"),
groups: Vec::with_capacity(0),
};
let _ = db.users.add_user(&user).await?;
let result = db.users.delete_user(&user.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/dao_test/user_to_role_dao_test.rs | tests/dao_test/user_to_role_dao_test.rs | use super::{init_db_context, randomize_string};
use sqlx;
use sqlx_user_crud::model::{Realm, Role, User};
use uuid::Uuid;
#[actix_rt::test]
async fn add_user_groups_returns_1_when_user_is_associated_with_group(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User::new("alice", "alice@email.com", "111-222-3344");
let group = randomize_string("user");
let _ = db.users.add_user(&user).await?;
let _ = db.roles.add_role(&Realm::default(), &group, &None).await?;
let group = db.roles.get_role_by_name(&group).await?;
let groups = vec![group];
let result = db.users_to_groups.add_user_groups(&user.id, &groups).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn add_user_groups_returns_3_when_user_is_associated_with_3_groups(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User::new("bob", "bob@email.com", "111-222-3344");
let group_names = vec![
randomize_string("engineer"),
randomize_string("architect"),
randomize_string("tester"),
];
let _ = db.users.add_user(&user).await?;
for group_name in group_names.iter() {
let _ = db.roles.add_role(&Realm::default(), group_name, &None).await?;
}
let mut groups = Vec::with_capacity(3);
for group_name in group_names.iter() {
let group = db.roles.get_role_by_name(group_name).await?;
groups.push(group);
}
let result = db.users_to_groups.add_user_groups(&user.id, &groups).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(3, result);
Ok(())
}
#[actix_rt::test]
async fn add_user_groups_returns_err_when_group_does_not_exist(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User::new("charlie", "charlie@email.com", "111-222-3344");
let groups = vec![Role {
id: 0,
max: None,
name: String::from("non-existent"),
}];
let _ = db.users.add_user(&user).await?;
let result = db.users_to_groups.add_user_groups(&user.id, &groups).await;
assert!(result.is_err());
Ok(())
}
#[actix_rt::test]
async fn add_user_groups_returns_err_when_user_does_not_exist() -> Result<(), sqlx::Error>
{
let db = init_db_context().await;
let group_name = randomize_string("hackers");
let _ = db.roles.add_role(&Realm::default(), &group_name, &None).await?;
let group = db.roles.get_role_by_name(&group_name).await?;
let groups = vec![group];
let result = db
.users_to_groups
.add_user_groups(&Uuid::new_v4().to_string(), &groups)
.await;
assert!(result.is_err());
Ok(())
}
#[actix_rt::test]
async fn get_groups_by_user_id_returns_users_groups() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User::new("candice", "candice@email.com", "111-222-3344");
let group = randomize_string("vendor");
{
let _ = db.users.add_user(&user).await?;
let _ = db.roles.add_role(&Realm::default(), &group, &None).await?;
let group = db.roles.get_role_by_name(&group).await?;
let groups = vec![group];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
let result = db.users_to_groups.get_groups_by_user_id(&user.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result.len());
assert_eq!(group, result[0].name);
Ok(())
}
#[actix_rt::test]
async fn get_groups_by_user_id_returns_empty_vec_when_user_does_not_exist() -> () {
let db = init_db_context().await;
let user_id = Uuid::new_v4().to_string();
let result = db.users_to_groups.get_groups_by_user_id(&user_id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result.len());
}
#[actix_rt::test]
async fn delete_by_user_id_returns_0_when_user_id_does_not_exist() -> () {
let db = init_db_context().await;
let user_id = Uuid::new_v4().to_string();
let result = db.users_to_groups.delete_by_user_id(&user_id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn delete_by_user_id_returns_number_of_rows_deleted() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User::new("donald", "donald@email.com", "111-222-3344");
let group = randomize_string("customer");
{
let _ = db.users.add_user(&user).await?;
let _ = db.roles.add_role(&Realm::default(), &group, &None).await?;
let group = db.roles.get_role_by_name(&group).await?;
let groups = vec![group];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
let result = db.users_to_groups.delete_by_user_id(&user.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn delete_by_group_id_returns_0_when_group_id_does_not_exist() -> () {
let db = init_db_context().await;
let result = db.users_to_groups.delete_by_group_id(0).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(0, result);
}
#[actix_rt::test]
async fn delete_by_group_id_returns_number_of_rows_deleted() -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User::new("emily", "emily@email.com", "111-222-3344");
let group = randomize_string("executive");
{
let _ = db.users.add_user(&user).await?;
let _ = db.roles.add_role(&Realm::default(), &group, &None).await?;
}
let group = db.roles.get_role_by_name(&group).await?;
{
let groups = vec![group.clone()];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
let result = db.users_to_groups.delete_by_group_id(group.id).await;
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(1, result);
Ok(())
}
#[actix_rt::test]
async fn update_user_groups_deletes_rows_when_users_group_vec_is_empty(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let user = User::new("frank", "frank@email.com", "111-222-3344");
let group_name = randomize_string("faculty");
{
let _ = db.users.add_user(&user).await?;
let _ = db.roles.add_role(&Realm::default(), &group_name, &None).await?;
}
let group = db.roles.get_role_by_name(&group_name).await?;
{
let groups = vec![group.clone()];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
// Assert the function returns 1 modification
let result = db.users_to_groups.update_user_groups(&user).await;
assert!(result.is_ok());
assert_eq!(1, result.unwrap());
// Verify the function has altered the table
let result = db.users_to_groups.get_groups_by_user_id(&user.id).await?;
assert_eq!(0, result.len());
Ok(())
}
#[actix_rt::test]
async fn update_user_groups_returns_deleted_plus_added_rows_when_groups_is_not_empty(
) -> Result<(), sqlx::Error> {
let db = init_db_context().await;
let mut user = User::new("frank", "frank@email.com", "111-222-3344");
let group_names = vec![randomize_string("general"), randomize_string("gossiper")];
{
let _ = db.users.add_user(&user).await?;
let _ = db.roles.add_role(&Realm::default(), &group_names[0], &None).await?;
let _ = db.roles.add_role(&Realm::default(), &group_names[1], &None).await?;
}
let groups = vec![
db.roles.get_role_by_name(&group_names[0]).await?,
db.roles.get_role_by_name(&group_names[1]).await?,
];
{
let groups = vec![groups[0].clone()];
let _ = db
.users_to_groups
.add_user_groups(&user.id, &groups)
.await?;
}
user.groups = groups;
// Assert the function returns 1 modification
let result = db.users_to_groups.update_user_groups(&user).await;
assert!(result.is_ok());
assert_eq!(3, result.unwrap());
Ok(())
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/tests/dao_test/mod.rs | tests/dao_test/mod.rs | use super::{init_db_context, randomize_string};
#[cfg(test)]
mod db_context_test;
mod group_dao_test;
#[cfg(test)]
mod user_dao_test;
mod user_to_group_dao_test;
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/lib.rs | crates/rust-mcp-extra/src/lib.rs | #[cfg(feature = "auth")]
pub mod auth_provider;
pub mod http_adaptors;
pub mod id_generator;
pub mod sqlite;
#[cfg(feature = "auth")]
pub mod token_verifier;
pub use rust_mcp_sdk::id_generator::IdGenerator;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/http_adaptors.rs | crates/rust-mcp-extra/src/http_adaptors.rs | //! This module provides utility functions for converting between http::Request / http::Response types and framework-specific request/response types.
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/sqlite.rs | crates/rust-mcp-extra/src/sqlite.rs | mod sqlite_event_store;
mod sqlite_session_store;
#[allow(unused)]
pub use sqlite_event_store::*;
#[allow(unused)]
pub use sqlite_session_store::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/auth_provider.rs | crates/rust-mcp-extra/src/auth_provider.rs | pub mod keycloak;
pub mod scalekit;
pub mod work_os;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/token_verifier.rs | crates/rust-mcp-extra/src/token_verifier.rs | mod generic_token_verifier;
mod jwt_cache;
pub use generic_token_verifier::*;
pub use jwt_cache::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/id_generator.rs | crates/rust-mcp-extra/src/id_generator.rs | //! This module provides implementations of various ID generators,
//! which can be used for generating `session_id`s in MCP servers.
#[cfg(feature = "nano_id")]
mod nano_id_generator;
#[cfg(feature = "random_62_id")]
mod random_base_62_id_generator;
#[cfg(feature = "snowflake_id")]
mod snow_flake_id_generator;
#[cfg(feature = "time_64_id")]
mod time_base_64_id_generator;
#[cfg(feature = "nano_id")]
pub use nano_id_generator::*;
#[cfg(feature = "random_62_id")]
pub use random_base_62_id_generator::*;
#[cfg(feature = "snowflake_id")]
pub use snow_flake_id_generator::*;
#[cfg(feature = "time_64_id")]
pub use time_base_64_id_generator::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/sqlite/sqlite_event_store.rs | crates/rust-mcp-extra/src/sqlite/sqlite_event_store.rs | //! This module serves as a placeholder for implementing a SQLite-backed event store.
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/sqlite/sqlite_session_store.rs | crates/rust-mcp-extra/src/sqlite/sqlite_session_store.rs | //! This module serves as a placeholder for implementing a SQLite-backed session store.
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/id_generator/random_base_62_id_generator.rs | crates/rust-mcp-extra/src/id_generator/random_base_62_id_generator.rs | //! Short, URL-safe, No collisions if length is sufficient
//! Needs collision handling if critical
use rand::Rng;
use rand_distr::Alphanumeric;
use rust_mcp_sdk::id_generator::IdGenerator;
/// A random Base62 ID generator.
///
/// Generates short, random alphanumeric strings composed of [A-Z, a-z, 0-9].
/// Useful when you want compact, URL-safe random IDs without needing
/// time-based ordering.
///
/// # Example
/// ```
/// use rust_mcp_extra::{id_generator::RandomBase62Generator,IdGenerator};
///
/// let generator = RandomBase62Generator::new(12);
/// let id: String = generator.generate();
/// println!("Generated Base62 ID: {}", id);
/// ```
pub struct RandomBase62Generator {
size: usize,
}
impl RandomBase62Generator {
/// Creates a new random Base62 ID generator.
///
/// # Arguments
/// * `size` - Length of the generated ID.
pub fn new(size: usize) -> Self {
Self { size }
}
}
impl<T> IdGenerator<T> for RandomBase62Generator
where
T: From<String>,
{
/// Generates a new random Base62 ID string.
///
/// The ID consists of randomly selected alphanumeric characters (A-Z, a-z, 0-9).
fn generate(&self) -> T {
let id: String = rand::rng()
.sample_iter(&Alphanumeric)
.take(self.size)
.map(char::from)
.collect();
T::from(id)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generates_non_empty_id() {
let generator = RandomBase62Generator::new(16);
let id: String = generator.generate();
assert_eq!(id.len(), 16);
assert!(!id.is_empty());
}
#[test]
fn generates_unique_ids() {
let generator = RandomBase62Generator::new(8);
let mut seen = std::collections::HashSet::new();
for _ in 0..1000 {
let id: String = generator.generate();
assert!(seen.insert(id), "Duplicate ID generated");
}
}
#[test]
fn only_alphanumeric_characters() {
let generator = RandomBase62Generator::new(50);
let id: String = generator.generate();
assert!(
id.chars().all(|c| c.is_ascii_alphanumeric()),
"ID contains non-alphanumeric chars"
);
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/id_generator/nano_id_generator.rs | crates/rust-mcp-extra/src/id_generator/nano_id_generator.rs | //! Short (Smaller than UUID), URL-safe, Customizable alphabet, Cryptographically secure
use nanoid::nanoid;
use rust_mcp_sdk::id_generator::IdGenerator;
/// A NanoID-based ID generator that produces short, URL-safe, unique strings.
///
/// This generator is well-suited for cases where:
/// - You want compact, human-friendly IDs
/// - UUIDs are too long or verbose
/// - You don't need time-based or ordered IDs
///
/// Internally uses the `nanoid` crate to generate secure, random IDs.
///
/// # Example
/// ```
/// use rust_mcp_extra::{id_generator::NanoIdGenerator,IdGenerator};
///
/// let generator = NanoIdGenerator::new(10);
/// let id: String = generator.generate();
/// println!("Generated ID: {}", id);
/// assert_eq!(id.len(), 10);
/// ```
pub struct NanoIdGenerator {
size: usize, // number of characters in the ID
}
impl NanoIdGenerator {
/// Creates a new Nano ID generator.
///
/// # Arguments
/// * `size` - Length of the generated ID (default: 21 if unsure)
pub fn new(size: usize) -> Self {
Self { size }
}
}
impl<T> IdGenerator<T> for NanoIdGenerator
where
T: From<String>,
{
fn generate(&self) -> T {
let size = self.size;
let id = nanoid!(size);
T::from(id)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generates_correct_length_id() {
let generator = NanoIdGenerator::new(12);
let id: String = generator.generate();
assert_eq!(id.len(), 12);
}
#[test]
fn generates_unique_ids() {
let generator = NanoIdGenerator::new(8);
let mut seen = std::collections::HashSet::new();
for _ in 0..1000 {
let id: String = generator.generate();
assert!(seen.insert(id.clone()), "Duplicate ID: {id}");
}
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/id_generator/snow_flake_id_generator.rs | crates/rust-mcp-extra/src/id_generator/snow_flake_id_generator.rs | //! Medium size ,Globally unique , Time-sortable , Compact (64 bits),
//! Use case: Distributed systems needing high-throughput, unique IDs without collisions.
//! [ timestamp (41 bits) | machine id (10 bits) | sequence (12 bits) ]
use once_cell::sync::Lazy;
use rust_mcp_sdk::id_generator::IdGenerator;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{SystemTime, UNIX_EPOCH};
/// Epoch (customizable to reduce total bits needed)
static SHORTER_EPOCH: Lazy<u64> = Lazy::new(|| {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("invalid system time!")
.as_millis() as u64
});
/// A Snowflake ID generator implementation producing 64-bit unique IDs.
///
/// Snowflake IDs are composed of:
/// - A timestamp in milliseconds since a custom epoch (usually a fixed past time),
/// - A machine ID (or worker ID) to differentiate between nodes,
/// - A sequence number that increments within the same millisecond to avoid collisions.
///
/// Format (64 bits total):
/// - 41 bits: timestamp (ms since SHORTER_EPOCH)
/// - 10 bits: machine ID (0-1023)
/// - 12 bits: sequence number (per ms)
///
/// This generator ensures:
/// - Uniqueness across multiple machines (given unique machine IDs),
/// - Monotonic increasing IDs when generated in the same process,
/// - Thread safety with internal locking.
pub struct SnowflakeIdGenerator {
machine_id: u16, // 10 bits max
last_timestamp: AtomicU64,
sequence: AtomicU64,
}
impl SnowflakeIdGenerator {
pub fn new(machine_id: u16) -> Self {
assert!(
machine_id < 1024,
"Machine ID must be less than 1024 (10 bits)"
);
SnowflakeIdGenerator {
machine_id,
last_timestamp: AtomicU64::new(0),
sequence: AtomicU64::new(0),
}
}
fn current_timestamp(&self) -> u64 {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("invalid system time!")
.as_millis() as u64;
now.saturating_sub(*SHORTER_EPOCH)
}
fn next_id(&self) -> u64 {
let mut timestamp = self.current_timestamp();
let last_ts = self.last_timestamp.load(Ordering::Relaxed);
let sequence = if timestamp == last_ts {
// same millisecond - increment sequence
let seq = self.sequence.fetch_add(1, Ordering::Relaxed) & 0xFFF; // 12 bits
if seq == 0 {
// Sequence overflow - wait for next ms
while timestamp <= last_ts {
timestamp = self.current_timestamp();
}
self.sequence.store(0, Ordering::Relaxed);
self.last_timestamp.store(timestamp, Ordering::Relaxed);
0
} else {
seq
}
} else {
// new timestamp
self.sequence.store(0, Ordering::Relaxed);
self.last_timestamp.store(timestamp, Ordering::Relaxed);
0
};
// Compose ID: [timestamp][machine_id][sequence]
((timestamp & 0x1FFFFFFFFFF) << 22) // 41 bits
| ((self.machine_id as u64 & 0x3FF) << 12) // 10 bits
| (sequence & 0xFFF) // 12 bits
}
}
impl<T> IdGenerator<T> for SnowflakeIdGenerator
where
T: From<String>,
{
fn generate(&self) -> T {
let id = self.next_id();
T::from(id.to_string()) // We could optionally encode it to base64 or base62
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generates_id() {
let generator = SnowflakeIdGenerator::new(1);
let id: String = generator.generate();
assert!(!id.is_empty(), "Generated ID should not be empty");
}
#[test]
fn generates_unique_ids() {
let generator = SnowflakeIdGenerator::new(1);
let mut ids = std::collections::HashSet::new();
for _ in 0..1000 {
let id: String = generator.generate();
assert!(ids.insert(id), "Duplicate ID generated");
}
}
#[test]
fn ids_are_monotonic_increasing() {
let generator = SnowflakeIdGenerator::new(1);
let mut prev_id = 0u64;
for _ in 0..1000 {
let id: String = generator.generate();
let current_id: u64 = id.parse().expect("ID should be a valid u64");
assert!(
current_id > prev_id,
"ID not strictly increasing: {current_id} <= {prev_id}"
);
prev_id = current_id;
}
}
#[test]
fn handles_sequence_rollover() {
// Try to simulate a sequence rollover by generating many IDs quickly
// just ensuring it doesn't panic
let generator = SnowflakeIdGenerator::new(1);
for _ in 0..2000 {
let _id: String = generator.generate();
}
}
#[test]
fn respects_machine_id_limit() {
// Valid machine ID
let _ = SnowflakeIdGenerator::new(1023);
}
#[test]
#[should_panic(expected = "Machine ID must be less than 1024")]
fn rejects_invalid_machine_id() {
// Invalid machine ID (greater than 1023)
let _ = SnowflakeIdGenerator::new(1024);
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/id_generator/time_base_64_id_generator.rs | crates/rust-mcp-extra/src/id_generator/time_base_64_id_generator.rs | //! Short, Fast, Sortable, Shorter than UUID
//! Not globally unique
use base64::engine::general_purpose;
use base64::Engine;
use rust_mcp_sdk::id_generator::IdGenerator;
use std::time::{SystemTime, UNIX_EPOCH};
/// A time-based ID generator that produces Base64-encoded timestamps.
///
/// This generator encodes the current timestamp in milliseconds since UNIX epoch
/// as a URL-safe Base64 string without padding. Optionally, it can prefix the ID
/// with a static string for better readability or namespacing.
///
/// # Example
/// ```
/// use rust_mcp_extra::{id_generator::TimeBase64Generator, IdGenerator};
///
/// let generator = TimeBase64Generator::new(Some("ts_"));
/// let id: String = generator.generate();
/// println!("Generated time-based ID: {}", id);
/// ```
pub struct TimeBase64Generator {
prefix: &'static str,
}
impl TimeBase64Generator {
/// Creates a new time-based Base64 ID generator with an optional prefix.
///
/// # Arguments
/// * `prefix` - Optional static string to prepend to generated IDs.
pub fn new(prefix: Option<&'static str>) -> Self {
Self {
prefix: prefix.unwrap_or(""),
}
}
/// Returns current timestamp in milliseconds since UNIX epoch.
fn current_millis() -> u64 {
let now = SystemTime::now();
let duration = now
.duration_since(UNIX_EPOCH)
.expect("invalid system time!");
duration.as_millis() as u64
}
}
impl<T> IdGenerator<T> for TimeBase64Generator
where
T: From<String>,
{
/// Generates a new time-based Base64 ID.
///
/// The ID is the current timestamp encoded as a URL-safe Base64 string (no padding),
/// optionally prefixed by the configured prefix.
fn generate(&self) -> T {
let timestamp = Self::current_millis();
let bytes = timestamp.to_le_bytes();
let encoded = general_purpose::URL_SAFE_NO_PAD.encode(bytes);
if self.prefix.is_empty() {
T::from(encoded)
} else {
T::from(format!("{}{}", self.prefix, encoded))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generates_non_empty_id() {
let generator = TimeBase64Generator::new(None);
let id: String = generator.generate();
assert!(!id.is_empty(), "ID should not be empty");
}
#[test]
fn generates_id_with_prefix() {
let prefix = "ts_";
let generator = TimeBase64Generator::new(Some(prefix));
let id: String = generator.generate();
assert!(id.starts_with(prefix), "ID should start with prefix");
}
#[test]
fn ids_change_over_time() {
let generator = TimeBase64Generator::new(None);
let id1: String = generator.generate();
std::thread::sleep(std::time::Duration::from_millis(2));
let id2: String = generator.generate();
assert_ne!(id1, id2, "IDs generated at different times should differ");
}
#[test]
fn base64_decodes_to_timestamp() {
let generator = TimeBase64Generator::new(None);
let id: String = generator.generate();
// Decode the base64 (without prefix)
let decoded = base64::engine::general_purpose::URL_SAFE_NO_PAD
.decode(&id)
.expect("Should decode base64");
// Convert bytes back to u64 timestamp
let timestamp = u64::from_le_bytes(decoded.try_into().unwrap());
assert!(timestamp > 0, "Timestamp should be positive");
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/token_verifier/jwt_cache.rs | crates/rust-mcp-extra/src/token_verifier/jwt_cache.rs | use std::collections::{HashMap, VecDeque};
use std::time::{Duration, Instant};
/// JWT introspection cache with TTL and max capacity
pub struct JwtCache {
map: HashMap<String, Instant>, // Key -> last introspection time
order: VecDeque<String>, // Keys in insertion order
remote_verification_interval: Duration,
capacity: usize,
}
impl JwtCache {
/// Create a new cache with given TTL and capacity
pub fn new(remote_verification_interval: Duration, capacity: usize) -> Self {
Self {
map: HashMap::with_capacity(capacity),
order: VecDeque::with_capacity(capacity),
remote_verification_interval,
capacity,
}
}
pub fn is_recent(&self, key: &str) -> bool {
self.map
.get(key)
.is_some_and(|t| t.elapsed() <= self.remote_verification_interval)
}
/// Record , updates timestamp or adds new entry
pub fn record(&mut self, key: String) {
// Remove expired entries first
self.remove_expired();
if self.map.contains_key(&key) {
// Update timestamp (no promotion in order)
self.map.insert(key.clone(), Instant::now());
} else {
// Evict oldest if over capacity
if self.map.len() >= self.capacity {
if let Some(oldest) = self.order.pop_front() {
self.map.remove(&oldest);
}
}
self.map.insert(key.clone(), Instant::now());
self.order.push_back(key);
}
}
/// Remove expired entries
pub fn remove_expired(&mut self) {
let now = Instant::now();
let mut expired = Vec::new();
for key in &self.order {
if let Some(&last) = self.map.get(key).as_ref() {
if now.duration_since(last.to_owned()) > self.remote_verification_interval {
expired.push(key.clone());
}
}
}
for key in expired {
self.map.remove(&key);
self.order.retain(|k| *k != key);
}
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/token_verifier/generic_token_verifier.rs | crates/rust-mcp-extra/src/token_verifier/generic_token_verifier.rs | use crate::token_verifier::jwt_cache::JwtCache;
use async_lock::RwLock;
use async_trait::async_trait;
use reqwest::{header::AUTHORIZATION, StatusCode};
use rust_mcp_sdk::{
auth::{
decode_token_header, Audience, AuthInfo, AuthenticationError, IntrospectionResponse,
JsonWebKeySet, OauthTokenVerifier,
},
mcp_http::error_message_from_response,
};
use serde_json::Value;
use std::{
collections::HashMap,
time::{Duration, SystemTime},
};
use url::Url;
const JWKS_REFRESH_TIME: Duration = Duration::from_secs(24 * 60 * 60); // re-fetch jwks every 24 hours
const REMOTE_VERIFICATION_INTERVAL: Duration = Duration::from_secs(15 * 60); // 15 minutes
const JWT_CACHE_CAPACITY: usize = 1000;
struct JwksCache {
last_updated: Option<SystemTime>,
jwks: JsonWebKeySet,
}
/// Supported OAuth token verification strategies.
///
/// Each variant represents a different method for validating access tokens,
/// depending on what the authorization server exposes or what your application
/// requires.
pub enum VerificationStrategies {
/// Verifies tokens by calling the authorization server's introspection
/// endpoint, as defined in RFC 7662.
///
/// This method allows the resource server to validate opaque or JWT tokens
/// by sending them to the introspection URI along with its client credentials.
Introspection {
/// The OAuth introspection endpoint.
introspection_uri: String,
/// Client identifier used to authenticate the introspection request.
client_id: String,
/// Client secret used to authenticate the introspection request.
client_secret: String,
/// Indicates whether the OAuth2 client should use HTTP Basic Authentication when
///calling the token introspection endpoint.
/// if false: client_id and client_secret will be sent in the POST body instead of using Basic Authentication
use_basic_auth: bool,
/// Optional key-value pairs to include as additional parameters in the
/// body of the token introspection request.
/// Example : ("token_type_hint", "access_token")
extra_params: Option<Vec<(&'static str, &'static str)>>,
},
/// Verifies JWT access tokens using the authorization server’s JSON Web Key
/// Set (JWKS) endpoint.
///
/// This strategy allows fully offline signature validation after retrieving
/// the key set, making it efficient for high-throughput services.
JWKs {
/// The JWKS endpoint URL used to retrieve signing keys.
jwks_uri: String,
},
/// Verifies tokens by querying the OpenID Connect UserInfo endpoint.
///
/// This strategy is typically used when token validity is tied to the user's
/// profile information or when the resource server relies on OIDC user data
/// for validation.
UserInfo { userinfo_uri: String },
}
/// Options for configuring a token verifier.
///
/// `TokenVerifierOptions` allows specifying one or more strategies for verifying
/// OAuth access tokens. Multiple strategies can be provided; the verifier will
/// attempt them in order until one succeeds or all fail.
pub struct TokenVerifierOptions {
/// The list of token verification strategies to use.
/// Each strategy defines a different method for validating tokens, such as
/// introspection, JWKS signature validation, or querying the UserInfo endpoint.
/// For optimal performance, it is recommended to include JWKS alongside either introspection or UserInfo.
pub strategies: Vec<VerificationStrategies>,
/// Optional audience value to validate against the token's `aud` claim.
pub validate_audience: Option<Audience>,
/// Optional issuer value to validate against the token's `iss` claim.
pub validate_issuer: Option<String>,
/// Optional capacity for the internal cache, used to reduce unnecessary requests during verification.
pub cache_capacity: Option<usize>,
}
#[derive(Default, Debug)]
struct StrategiesOptions {
pub introspection_uri: Option<Url>,
pub introspection_basic_auth: bool,
pub introspect_extra_params: Option<Vec<(&'static str, &'static str)>>,
pub client_id: Option<String>,
pub client_secret: Option<String>,
pub jwks_uri: Option<Url>,
pub userinfo_uri: Option<Url>,
}
impl TokenVerifierOptions {
fn unpack(&mut self) -> Result<(StrategiesOptions, bool), AuthenticationError> {
let mut result = StrategiesOptions::default();
let mut has_jwks = false;
let mut has_other = false;
for strategy in self.strategies.drain(0..) {
match strategy {
VerificationStrategies::Introspection {
introspection_uri,
client_id,
client_secret,
use_basic_auth,
extra_params,
} => {
result.introspection_uri =
Some(Url::parse(&introspection_uri).map_err(|err| {
AuthenticationError::ParsingError(format!(
"Invalid introspection uri: {err}",
))
})?);
result.client_id = Some(client_id);
result.client_secret = Some(client_secret);
result.introspection_basic_auth = use_basic_auth;
result.introspect_extra_params = extra_params;
has_other = true;
}
VerificationStrategies::JWKs { jwks_uri } => {
result.jwks_uri = Some(Url::parse(&jwks_uri).map_err(|err| {
AuthenticationError::ParsingError(format!("Invalid jwks uri: {err}"))
})?);
has_jwks = true;
}
VerificationStrategies::UserInfo { userinfo_uri } => {
result.userinfo_uri = Some(Url::parse(&userinfo_uri).map_err(|err| {
AuthenticationError::ParsingError(format!("Invalid userinfo uri: {err}"))
})?);
has_other = true;
}
}
}
Ok((result, has_jwks && has_other))
}
}
pub struct GenericOauthTokenVerifier {
/// Optional audience value to validate against the token's `aud` claim.
validate_audience: Option<Audience>,
/// Optional issuer value to validate against the token's `iss` claim.
validate_issuer: Option<String>,
jwt_cache: Option<RwLock<JwtCache>>,
json_web_key_set: RwLock<Option<JwksCache>>,
introspection_uri: Option<Url>,
introspection_basic_auth: bool,
introspect_extra_params: Option<Vec<(&'static str, &'static str)>>,
client_id: Option<String>,
client_secret: Option<String>,
jwks_uri: Option<Url>,
userinfo_uri: Option<Url>,
}
impl GenericOauthTokenVerifier {
pub fn new(mut options: TokenVerifierOptions) -> Result<Self, AuthenticationError> {
let (strategy_options, chachable) = options.unpack()?;
let validate_audience = options.validate_audience.take();
let validate_issuer = options
.validate_issuer
.map(|iss| iss.trim_end_matches('/').to_string());
// we only need to cache if both jwks and introspection are supported
let jwt_cache = if chachable {
Some(RwLock::new(JwtCache::new(
REMOTE_VERIFICATION_INTERVAL,
options.cache_capacity.unwrap_or(JWT_CACHE_CAPACITY),
)))
} else {
None
};
Ok(Self {
validate_issuer,
validate_audience,
jwt_cache,
json_web_key_set: RwLock::new(None),
introspection_uri: strategy_options.introspection_uri,
introspection_basic_auth: strategy_options.introspection_basic_auth,
introspect_extra_params: strategy_options.introspect_extra_params,
client_id: strategy_options.client_id,
client_secret: strategy_options.client_secret,
jwks_uri: strategy_options.jwks_uri,
userinfo_uri: strategy_options.userinfo_uri,
})
}
async fn verify_user_info(
&self,
token: &str,
token_unique_id: Option<&str>,
user_info_endpoint: &Url,
) -> Result<AuthInfo, AuthenticationError> {
// use token_unique_id or get from token header
let token_unique_id = match token_unique_id {
Some(id) => id.to_owned(),
None => {
let header = decode_token_header(token)?;
header.kid.unwrap_or(token.to_string()).to_owned()
}
};
let client = reqwest::Client::new();
let response = client
.get(user_info_endpoint.to_owned())
.header(AUTHORIZATION, format!("Bearer {token}"))
.send()
.await
.map_err(|err| AuthenticationError::Jwks(err.to_string()))?;
let status_code = response.status();
if !response.status().is_success() {
return Err(AuthenticationError::TokenVerificationFailed {
description: error_message_from_response(response, "Unauthorized!").await,
status_code: Some(status_code.as_u16()),
});
}
let json: Value = response.json().await.unwrap();
let extra = match json {
Value::Object(map) => Some(map),
_ => None,
};
let auth_info: AuthInfo = AuthInfo {
token_unique_id,
client_id: None,
user_id: None,
scopes: None,
expires_at: None,
audience: None,
extra,
};
Ok(auth_info)
}
async fn verify_introspection(
&self,
token: &str,
introspection_endpoint: &Url,
) -> Result<AuthInfo, AuthenticationError> {
let client = reqwest::Client::new();
// Form data body
let mut form = HashMap::new();
form.insert("token", token);
if !self.introspection_basic_auth {
if let Some(client_id) = self.client_id.as_ref() {
form.insert("client_id", client_id);
};
if let Some(client_secret) = self.client_secret.as_ref() {
form.insert("client_secret", client_secret);
};
}
if let Some(extra_params) = self.introspect_extra_params.as_ref() {
extra_params.iter().for_each(|(key, value)| {
form.insert(key, value);
});
}
let mut request = client.post(introspection_endpoint.to_owned()).form(&form);
if self.introspection_basic_auth {
request = request.basic_auth(
self.client_id.clone().unwrap_or_default(),
self.client_secret.clone(),
);
}
let response = request
.send()
.await
.map_err(|err| AuthenticationError::Jwks(err.to_string()))?;
let status_code = response.status();
if !response.status().is_success() {
let description = response.text().await.unwrap_or("Unauthorized!".to_string());
return Err(AuthenticationError::TokenVerificationFailed {
description,
status_code: Some(status_code.as_u16()),
});
}
let introspect_response: IntrospectionResponse = response
.json()
.await
.map_err(|err| AuthenticationError::Jwks(err.to_string()))?;
if !introspect_response.active {
return Err(AuthenticationError::InactiveToken);
}
if let Some(validate_audience) = self.validate_audience.as_ref() {
let Some(token_audience) = introspect_response.audience.as_ref() else {
return Err(AuthenticationError::InvalidToken {
description: "Audience attribute (aud) is missing.",
});
};
if token_audience != validate_audience {
return Err(AuthenticationError::TokenVerificationFailed { description:
format!("None of the provided audiences are allowed. Expected ${validate_audience}, got: ${token_audience}")
, status_code: Some(StatusCode::UNAUTHORIZED.as_u16())
});
}
}
if let Some(validate_issuer) = self.validate_issuer.as_ref() {
let Some(token_issuer) = introspect_response.issuer.as_ref() else {
return Err(AuthenticationError::InvalidToken {
description: "Issuer (iss) is missing.",
});
};
if token_issuer != validate_issuer {
return Err(AuthenticationError::TokenVerificationFailed {
description: format!(
"Issuer is not allowed. Expected ${validate_issuer}, got: ${token_issuer}"
),
status_code: Some(StatusCode::UNAUTHORIZED.as_u16()),
});
}
}
AuthInfo::from_introspection_response(token.to_owned(), introspect_response, None)
}
async fn populate_jwks(&self, jwks_uri: &Url) -> Result<(), AuthenticationError> {
let response = reqwest::get(jwks_uri.to_owned())
.await
.map_err(|err| AuthenticationError::Jwks(err.to_string()))?;
let jwks: JsonWebKeySet = response
.json()
.await
.map_err(|err| AuthenticationError::Jwks(err.to_string()))?;
let mut guard = self.json_web_key_set.write().await;
*guard = Some(JwksCache {
last_updated: Some(SystemTime::now()),
jwks,
});
Ok(())
}
async fn verify_jwks(&self, token: &str, jwks: &Url) -> Result<AuthInfo, AuthenticationError> {
// read-modify-write pattern
{
let guard = self.json_web_key_set.read().await;
if let Some(cache) = guard.as_ref() {
if let Some(last_updated) = cache.last_updated {
if SystemTime::now()
.duration_since(last_updated)
.unwrap_or(Duration::from_secs(0))
< JWKS_REFRESH_TIME
{
let token_info = cache.jwks.verify(
token.to_string(),
self.validate_audience.as_ref(),
self.validate_issuer.as_ref(),
)?;
return AuthInfo::from_token_data(token.to_owned(), token_info, None);
}
}
}
}
// Refresh JWKS if cache is invalid or missing
self.populate_jwks(jwks).await?;
// Proceed with verification
let guard = self.json_web_key_set.read().await;
if let Some(cache) = guard.as_ref() {
let token_info = cache.jwks.verify(
token.to_string(),
self.validate_audience.as_ref(),
self.validate_issuer.as_ref(),
)?;
AuthInfo::from_token_data(token.to_owned(), token_info, None)
} else {
Err(AuthenticationError::Jwks(
"Failed to retrieve or parse JWKS".to_string(),
))
}
}
}
#[async_trait]
impl OauthTokenVerifier for GenericOauthTokenVerifier {
async fn verify_token(&self, access_token: String) -> Result<AuthInfo, AuthenticationError> {
// perform local jwks verification if supported
if let Some(jwks_endpoint) = self.jwks_uri.as_ref() {
let mut auth_info = self.verify_jwks(&access_token, jwks_endpoint).await?;
// perform remote verification only if it is supported and jwt is stale
if let Some(jwt_cache) = self.jwt_cache.as_ref() {
// return auth_info if it is recent
if jwt_cache.read().await.is_recent(&auth_info.token_unique_id) {
return Ok(auth_info);
}
// introspection validation if introspection_uri is provided
if let Some(introspection_endpoint) = self.introspection_uri.as_ref() {
let fresh_auth_info = self
.verify_introspection(&access_token, introspection_endpoint)
.await?;
jwt_cache
.write()
.await
.record(fresh_auth_info.token_unique_id.to_owned());
return Ok(fresh_auth_info);
}
// call userInfo endpoint only if introspect strategy is not used
if let Some(user_info_endpoint) = self.userinfo_uri.as_ref() {
let fresh_auth_info = self
.verify_user_info(
&access_token,
Some(&auth_info.token_unique_id),
user_info_endpoint,
)
.await?;
auth_info.extra = fresh_auth_info.extra;
jwt_cache
.write()
.await
.record(auth_info.token_unique_id.to_owned());
return Ok(auth_info);
}
}
return Ok(auth_info);
}
// use introspection if jwks is not supported, no caching
if let Some(introspection_endpoint) = self.introspection_uri.as_ref() {
let auth_info = self
.verify_introspection(&access_token, introspection_endpoint)
.await?;
return Ok(auth_info);
}
// use userInfo endpoint if introspect strategy is not used
if let Some(user_info_endpoint) = self.userinfo_uri.as_ref() {
let auth_info = self
.verify_user_info(&access_token, None, user_info_endpoint)
.await?;
return Ok(auth_info);
}
Err(AuthenticationError::InvalidToken {
description: "Invalid token verification strategy!",
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use oauth2_test_server::{OAuthTestServer, OauthEndpoints};
use rust_mcp_sdk::auth::*;
use serde_json::json;
async fn token_verifier(
strategies: Vec<VerificationStrategies>,
endpoints: &OauthEndpoints,
audience: Option<Audience>,
) -> GenericOauthTokenVerifier {
let auth_metadata = AuthMetadataBuilder::new("http://127.0.0.1:3000/mcp")
.issuer(&endpoints.oauth_server)
.authorization_servers(vec![&endpoints.oauth_server])
.authorization_endpoint(&endpoints.authorize)
.token_endpoint(&endpoints.token)
.scopes_supported(vec!["openid".to_string()])
.introspection_endpoint(&endpoints.introspect)
.jwks_uri(&endpoints.jwks)
.resource_name("MCP Demo Server".to_string())
.build()
.unwrap();
let meta = &auth_metadata.0;
let token_verifier = GenericOauthTokenVerifier::new(TokenVerifierOptions {
validate_audience: audience,
validate_issuer: Some(meta.issuer.to_string()),
strategies,
cache_capacity: None,
})
.unwrap();
token_verifier
}
#[tokio::test]
async fn test_jwks_strategy() {
let server = OAuthTestServer::start().await;
let client = server.register_client(
json!({ "scope": "openid", "redirect_uris":["http://localhost:8080/callback"]}),
);
let verifier = token_verifier(
vec![VerificationStrategies::JWKs {
jwks_uri: server.endpoints.jwks.clone(),
}],
&server.endpoints,
Some(Audience::Single(client.client_id.clone())),
)
.await;
let token = server.generate_jwt(&client, server.jwt_options().user_id("rustmcp").build());
let auth_info = verifier.verify_token(token).await.unwrap();
assert_eq!(
auth_info.audience.as_ref().unwrap().to_string(),
client.client_id
);
assert_eq!(
auth_info.client_id.as_ref().unwrap().to_string(),
client.client_id
);
assert_eq!(auth_info.user_id.as_ref().unwrap(), "rustmcp");
let scopes = auth_info.scopes.as_ref().unwrap();
assert_eq!(scopes.as_slice(), ["openid"]);
}
#[tokio::test]
async fn test_userinfo_strategy() {
let server = OAuthTestServer::start().await;
let client = server.register_client(
json!({ "scope": "openid", "redirect_uris":["http://localhost:8080/callback"]}),
);
let verifier = token_verifier(
vec![VerificationStrategies::UserInfo {
userinfo_uri: server.endpoints.userinfo.clone(),
}],
&server.endpoints,
None,
)
.await;
let token = server.generate_token(&client, server.jwt_options().user_id("rustmcp").build());
let auth_info = verifier.verify_token(token.access_token).await.unwrap();
assert!(auth_info.audience.is_none());
assert_eq!(
auth_info
.extra
.unwrap()
.get("sub")
.unwrap()
.as_str()
.unwrap(),
"rustmcp"
);
}
#[tokio::test]
async fn test_introspect_strategy() {
let server = OAuthTestServer::start().await;
let client = server.register_client(
json!({ "scope": "openid", "redirect_uris":["http://localhost:8080/callback"]}),
);
let verifier = token_verifier(
vec![VerificationStrategies::Introspection {
introspection_uri: server.endpoints.introspect.clone(),
client_id: client.client_id.clone(),
client_secret: client.client_secret.as_ref().unwrap().clone(),
use_basic_auth: true,
extra_params: None,
}],
&server.endpoints,
None,
)
.await;
let token = server.generate_token(&client, server.jwt_options().user_id("rustmcp").build());
let auth_info = verifier.verify_token(token.access_token).await.unwrap();
assert_eq!(
auth_info.audience.as_ref().unwrap().to_string(),
client.client_id
);
assert_eq!(
auth_info.client_id.as_ref().unwrap().to_string(),
client.client_id
);
assert_eq!(auth_info.user_id.as_ref().unwrap(), "rustmcp");
let scopes = auth_info.scopes.as_ref().unwrap();
assert_eq!(scopes.as_slice(), ["openid"]);
}
#[tokio::test]
async fn test_introspect_strategy_with_client_secret_post() {
let server = OAuthTestServer::start().await;
let client = server.register_client(
json!({ "scope": "openid profile", "redirect_uris":["http://localhost:8080/cb"]}),
);
let verifier = token_verifier(
vec![VerificationStrategies::Introspection {
introspection_uri: server.endpoints.introspect.clone(),
client_id: client.client_id.clone(),
client_secret: client.client_secret.as_ref().unwrap().clone(),
use_basic_auth: false, // <--- POST body instead of Basic Auth
extra_params: None,
}],
&server.endpoints,
Some(Audience::Single(client.client_id.clone())),
)
.await;
let token = server.generate_token(&client, server.jwt_options().user_id("alice").build());
let auth_info = verifier.verify_token(token.access_token).await.unwrap();
assert_eq!(auth_info.user_id.as_ref().unwrap(), "alice");
assert!(auth_info.scopes.unwrap().contains(&"profile".to_string()));
assert_eq!(
auth_info.audience.as_ref().unwrap().to_string(),
client.client_id
);
}
#[tokio::test]
async fn test_introspect_rejects_inactive_token() {
let server = OAuthTestServer::start().await;
let client = server
.register_client(json!({ "scope": "openid", "redirect_uris": ["http://localhost"] }));
let verifier = token_verifier(
vec![VerificationStrategies::Introspection {
introspection_uri: server.endpoints.introspect.clone(),
client_id: client.client_id.clone(),
client_secret: client.client_secret.as_ref().unwrap().clone(),
use_basic_auth: true,
extra_params: None,
}],
&server.endpoints,
None,
)
.await;
let token_response =
server.generate_token(&client, server.jwt_options().user_id("bob").build());
server
.revoke_token(&client, &token_response.access_token)
.await;
let result = verifier.verify_token(token_response.access_token).await;
assert!(matches!(result, Err(AuthenticationError::InactiveToken)));
}
#[tokio::test]
async fn test_expired_token_rejected_by_jwks_and_introspection() {
let server = OAuthTestServer::start().await;
let client = server.register_client(
json!({ "scope": "openid email", "redirect_uris": ["http://localhost"] }),
);
// Use both strategies → expect rejection on expiration alone
let verifier = token_verifier(
vec![
VerificationStrategies::JWKs {
jwks_uri: server.endpoints.jwks.clone(),
},
VerificationStrategies::Introspection {
introspection_uri: server.endpoints.introspect.clone(),
client_id: client.client_id.clone(),
client_secret: client.client_secret.as_ref().unwrap().clone(),
use_basic_auth: true,
extra_params: None,
},
],
&server.endpoints,
Some(Audience::Single(client.client_id.clone())),
)
.await;
// Generate short-lived token
let short_lived = server
.jwt_options()
.user_id("charlie")
.expires_in(1)
.build();
let token = server.generate_token(&client, short_lived);
// Wait for expiry
tokio::time::sleep(tokio::time::Duration::from_millis(1500)).await;
// JWKS should reject immediately (exp validation)
// But since fallback is enabled, it hits introspection → active: false → error
let err1 = verifier
.verify_token(token.access_token.clone())
.await
.unwrap_err();
assert!(matches!(err1, AuthenticationError::InactiveToken));
// Now revoke it (expired + revoked) → still InactiveToken (no special handling needed)
server.revoke_token(&client, &token.access_token).await;
let err2 = verifier.verify_token(token.access_token).await.unwrap_err();
assert!(matches!(err2, AuthenticationError::InactiveToken));
}
#[tokio::test]
async fn test_jwks_and_introspection_cache_works() {
let server = OAuthTestServer::start().await;
let client = server
.register_client(json!({ "scope": "openid", "redirect_uris": ["http://localhost"] }));
let verifier = token_verifier(
vec![
VerificationStrategies::JWKs {
jwks_uri: server.endpoints.jwks.clone(),
},
VerificationStrategies::Introspection {
introspection_uri: server.endpoints.introspect.clone(),
client_id: client.client_id.clone(),
client_secret: client.client_secret.as_ref().unwrap().clone(),
use_basic_auth: true,
extra_params: None,
},
],
&server.endpoints,
None,
)
.await;
let token = server.generate_token(&client, server.jwt_options().user_id("dave").build());
// First call → goes through full flow
let info1 = verifier
.verify_token(token.access_token.clone())
.await
.unwrap();
// Second call → should hit cache (no network)
let info2 = verifier
.verify_token(token.access_token.clone())
.await
.unwrap();
assert_eq!(info1.user_id, info2.user_id);
assert_eq!(info1.token_unique_id, info2.token_unique_id);
}
#[tokio::test]
async fn test_audience_validation_rejects_wrong_aud() {
let server = OAuthTestServer::start().await;
let client = server
.register_client(json!({ "scope": "openid", "redirect_uris": ["http://localhost"] }));
let verifier = token_verifier(
vec![VerificationStrategies::Introspection {
introspection_uri: server.endpoints.introspect.clone(),
client_id: client.client_id.clone(),
client_secret: client.client_secret.as_ref().unwrap().clone(),
use_basic_auth: true,
extra_params: None,
}],
&server.endpoints,
Some(Audience::Single("wrong-client-id-999".to_string())),
)
.await;
let token = server.generate_token(&client, server.jwt_options().user_id("eve").build());
let err = verifier.verify_token(token.access_token).await.unwrap_err();
assert!(matches!(
err,
AuthenticationError::TokenVerificationFailed { .. }
));
}
#[tokio::test]
async fn test_issuer_validation_rejects_wrong_iss() {
let server = OAuthTestServer::start().await;
let client = server
.register_client(json!({ "scope": "openid", "redirect_uris": ["http://localhost"] }));
let _verifier = token_verifier(
vec![VerificationStrategies::JWKs {
jwks_uri: server.endpoints.jwks.clone(),
}],
&server.endpoints,
None,
)
.await;
// Force wrong expected issuer
let wrong_verifier = GenericOauthTokenVerifier::new(TokenVerifierOptions {
strategies: vec![VerificationStrategies::JWKs {
jwks_uri: server.endpoints.jwks.clone(),
}],
validate_audience: None,
validate_issuer: Some("https://wrong-issuer.example.com".to_string()),
cache_capacity: None,
})
.unwrap();
let token = server.generate_token(&client, server.jwt_options().user_id("frank").build());
let err = wrong_verifier
.verify_token(token.access_token)
.await
.unwrap_err();
assert!(matches!(
err,
AuthenticationError::TokenVerificationFailed { .. }
));
}
#[tokio::test]
async fn test_userinfo_enriches_jwt_claims() {
let server = OAuthTestServer::start().await;
let client = server.register_client(
json!({ "scope": "openid profile email", "redirect_uris": ["http://localhost"] }),
);
let verifier = token_verifier(
vec![
VerificationStrategies::JWKs {
jwks_uri: server.endpoints.jwks.clone(),
},
VerificationStrategies::UserInfo {
userinfo_uri: server.endpoints.userinfo.clone(),
},
],
&server.endpoints,
None,
)
.await;
let token = server.generate_token(&client, server.jwt_options().user_id("grace").build());
let auth_info = verifier.verify_token(token.access_token).await.unwrap();
let extra = auth_info.extra.unwrap();
assert_eq!(
extra.get("email").unwrap().as_str().unwrap(),
"test@example.com"
);
assert_eq!(extra.get("name").unwrap().as_str().unwrap(), "Test User");
assert!(extra.get("picture").is_some());
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/auth_provider/work_os.rs | crates/rust-mcp-extra/src/auth_provider/work_os.rs | //! # WorkOS AuthKit OAuth2 Provider for MCP Servers
//!
//! This module implements an OAuth2 specifically designed to integrate
//! [WorkOS AuthKit](https://workos.com/docs/authkit) as the identity
//! provider (IdP) in an MCP (Model Context Protocol) server ecosystem.
//!
//! It enables your MCP server to:
//! - Expose standard OAuth2/.well-known endpoints
//! - Serve authorization server metadata (`/.well-known/oauth-authorization-server`)
//! - Serve protected resource metadata (custom per MCP)
//! - Verify incoming access tokens using JWKs + UserInfo endpoint validation
//!
//! ## Features
//!
//! - Zero-downtime token verification with cached JWKs
//! - Automatic construction of OAuth2 discovery documents
//! - Built-in CORS support for metadata endpoints
//! - Pluggable into `rust-mcp-sdk`'s authentication system via the `AuthProvider` trait
//!
//! ## Example
//!
//! ```rust,ignore
//!
//! let auth_provider = WorkOsAuthProvider::new(WorkOSAuthOptions {
//! // Your AuthKit app domain (found in WorkOS dashboard)
//! authkit_domain: "https://your-app.authkit.app".to_string(),
//! // Base URL of your MCP server (used to build protected resource metadata URL)
//! mcp_server_url: "http://localhost:3000/mcp".to_string(),
//! })?;
//!
//! // Register in your MCP server
//! let server = hyper_server::create_server(
//! server_details,
//! handler,
//! HyperServerOptions {
//! host: "localhost".to_string(),
//! port: 3000,
//! auth: Some(Arc::new(auth_provider)),
//! ..Default::default()
//! });
//! ```
use crate::token_verifier::{
GenericOauthTokenVerifier, TokenVerifierOptions, VerificationStrategies,
};
use async_trait::async_trait;
use bytes::Bytes;
use http::{header::CONTENT_TYPE, StatusCode};
use http_body_util::{BodyExt, Full};
use rust_mcp_sdk::{
auth::{
create_discovery_endpoints, AuthInfo, AuthMetadataBuilder, AuthProvider,
AuthenticationError, AuthorizationServerMetadata, OauthEndpoint,
OauthProtectedResourceMetadata, OauthTokenVerifier,
},
error::McpSdkError,
mcp_http::{middleware::CorsMiddleware, GenericBody, GenericBodyExt, Middleware},
mcp_server::{
error::{TransportServerError, TransportServerResult},
join_url, McpAppState,
},
};
use std::{collections::HashMap, sync::Arc, vec};
static SCOPES_SUPPORTED: &[&str] = &["email", "offline_access", "openid", "profile"];
/// Configuration options for the WorkOS AuthKit OAuth provider.
pub struct WorkOSAuthOptions<'a> {
pub authkit_domain: String,
pub mcp_server_url: String,
pub required_scopes: Option<Vec<&'a str>>,
pub token_verifier: Option<Box<dyn OauthTokenVerifier>>,
pub resource_name: Option<String>,
pub resource_documentation: Option<String>,
}
/// WorkOS AuthKit integration implementing `AuthProvider` for MCP servers.
///
/// This provider makes your MCP server compatible with clients that expect standard
/// OAuth2 authorization server and protected resource discovery endpoints when using
/// WorkOS AuthKit as the identity provider.
pub struct WorkOsAuthProvider {
auth_server_meta: AuthorizationServerMetadata,
protected_resource_meta: OauthProtectedResourceMetadata,
endpoint_map: HashMap<String, OauthEndpoint>,
protected_resource_metadata_url: String,
token_verifier: Box<dyn OauthTokenVerifier>,
}
impl WorkOsAuthProvider {
/// Creates a new `WorkOsAuthProvider` instance.
///
/// This performs:
/// - Validation and parsing of URLs
/// - Construction of OAuth2 metadata documents
/// - Setup of token verification using JWKs and UserInfo endpoint
///
/// /// # Example
///
/// ```rust,ignore
/// use rust_mcp_extra::auth_provider::work_os::{WorkOSAuthOptions, WorkOsAuthProvider};
///
/// let auth_provider = WorkOsAuthProvider::new(WorkOSAuthOptions {
/// authkit_domain: "https://your-app.authkit.app".to_string(),
/// mcp_server_url: "http://localhost:3000/mcp".to_string(),
/// })?;
///
pub fn new(mut options: WorkOSAuthOptions) -> Result<Self, McpSdkError> {
let (endpoint_map, protected_resource_metadata_url) =
create_discovery_endpoints(&options.mcp_server_url)?;
let required_scopes = options.required_scopes.take();
let scopes_supported = required_scopes.clone().unwrap_or(SCOPES_SUPPORTED.to_vec());
let mut builder = AuthMetadataBuilder::new(&options.mcp_server_url)
.issuer(&options.authkit_domain)
.authorization_servers(vec![&options.authkit_domain])
.authorization_endpoint("/oauth2/authorize")
.introspection_endpoint("/oauth2/introspection")
.registration_endpoint("/oauth2/register")
.token_endpoint("/oauth2/token")
.jwks_uri("/oauth2/jwks")
.scopes_supported(scopes_supported);
if let Some(scopes) = required_scopes {
builder = builder.reqquired_scopes(scopes)
}
if let Some(resource_name) = options.resource_name.as_ref() {
builder = builder.resource_name(resource_name)
}
if let Some(resource_documentation) = options.resource_documentation.as_ref() {
builder = builder.service_documentation(resource_documentation)
}
let (auth_server_meta, protected_resource_meta) = builder.build()?;
let Some(jwks_uri) = auth_server_meta.jwks_uri.as_ref().map(|s| s.to_string()) else {
return Err(McpSdkError::Internal {
description: "jwks_uri is not defined!".to_string(),
});
};
let userinfo_uri = join_url(&auth_server_meta.issuer, "oauth2/userinfo")
.map_err(|err| McpSdkError::Internal {
description: format!("invalid userinfo url :{err}"),
})?
.to_string();
let token_verifier: Box<dyn OauthTokenVerifier> = match options.token_verifier {
Some(verifier) => verifier,
None => Box::new(GenericOauthTokenVerifier::new(TokenVerifierOptions {
strategies: vec![
VerificationStrategies::JWKs { jwks_uri },
VerificationStrategies::UserInfo { userinfo_uri },
],
validate_audience: None,
validate_issuer: Some(options.authkit_domain.clone()),
cache_capacity: None,
})?),
};
Ok(Self {
endpoint_map,
protected_resource_metadata_url,
token_verifier,
auth_server_meta,
protected_resource_meta,
})
}
/// Helper to build JSON response for authorization server metadata with CORS.
fn handle_authorization_server_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
/// Helper to build JSON response for protected resource metadata with permissive CORS.
fn handle_protected_resource_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
use http_body_util::BodyExt;
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
}
#[async_trait]
impl AuthProvider for WorkOsAuthProvider {
/// Returns the map of supported OAuth discovery endpoints.
fn auth_endpoints(&self) -> Option<&HashMap<String, OauthEndpoint>> {
Some(&self.endpoint_map)
}
/// Handles incoming requests to OAuth metadata endpoints.
async fn handle_request(
&self,
request: http::Request<&str>,
state: Arc<McpAppState>,
) -> Result<http::Response<GenericBody>, TransportServerError> {
let Some(endpoint) = self.endpoint_type(&request) else {
return http::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(GenericBody::empty())
.map_err(|err| TransportServerError::HttpError(err.to_string()));
};
// return early if method is not allowed
if let Some(response) = self.validate_allowed_methods(endpoint, request.method()) {
return Ok(response);
}
match endpoint {
OauthEndpoint::AuthorizationServerMetadata => {
let json_payload = serde_json::to_string(&self.auth_server_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_authorization_server_metadata(json_payload) },
)
}),
)
.await
}
OauthEndpoint::ProtectedResourceMetadata => {
let json_payload = serde_json::to_string(&self.protected_resource_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_protected_resource_metadata(json_payload) },
)
}),
)
.await
}
_ => Ok(GenericBody::create_404_response()),
}
}
/// Verifies an access token using JWKs and optional UserInfo validation.
///
/// Returns authenticated `AuthInfo` on success.
async fn verify_token(&self, access_token: String) -> Result<AuthInfo, AuthenticationError> {
self.token_verifier.verify_token(access_token).await
}
/// Returns the full URL to the protected resource metadata document.
fn protected_resource_metadata_url(&self) -> Option<&str> {
Some(self.protected_resource_metadata_url.as_str())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/auth_provider/scalekit.rs | crates/rust-mcp-extra/src/auth_provider/scalekit.rs | use crate::token_verifier::{
GenericOauthTokenVerifier, TokenVerifierOptions, VerificationStrategies,
};
use async_trait::async_trait;
use bytes::Bytes;
use http::{header::CONTENT_TYPE, StatusCode};
use http_body_util::{BodyExt, Full};
use rust_mcp_sdk::{
auth::{
create_discovery_endpoints, AuthInfo, AuthMetadataBuilder, AuthProvider,
AuthenticationError, AuthorizationServerMetadata, OauthEndpoint,
OauthProtectedResourceMetadata, OauthTokenVerifier,
},
error::McpSdkError,
mcp_http::{middleware::CorsMiddleware, GenericBody, GenericBodyExt, Middleware},
mcp_server::{
error::{TransportServerError, TransportServerResult},
join_url, McpAppState,
},
};
use std::{collections::HashMap, sync::Arc, vec};
use url::Url;
/// Configuration options for the [`ScalekitAuthProvider`].
///
/// These values come from the Scalekit dashboard and MCP server configuration.
pub struct ScalekitAuthOptions<'a> {
/// Base Scalekit environment URL.
/// This value can be found in the Scalekit dashboard, located in the Settings section
///
/// If protocol is missing (no `http://` or `https://`), `https://` is automatically added.
pub environment_url: String,
/// This value can be found in the Scalekit dashboard, located in MCp Servers
pub resource_id: String,
/// Public-facing MCP server base URL.
pub mcp_server_url: String,
/// Optional list of required OAuth scopes for this resource.
pub required_scopes: Option<Vec<&'a str>>,
/// Human-readable resource name for documentation/metadata.
pub resource_name: Option<String>,
/// Human-readable resource documentation URL or content identifier.
pub resource_documentation: Option<String>,
/// Optional custom token verifier.
/// If omitted, a default JWK-based [`GenericOauthTokenVerifier`] is created.
pub token_verifier: Option<Box<dyn OauthTokenVerifier>>,
}
/// MCP OAuth provider implementation for Scalekit.
pub struct ScalekitAuthProvider {
auth_server_meta: AuthorizationServerMetadata,
protected_resource_meta: OauthProtectedResourceMetadata,
endpoint_map: HashMap<String, OauthEndpoint>,
protected_resource_metadata_url: String,
token_verifier: Box<dyn OauthTokenVerifier>,
}
impl ScalekitAuthProvider {
/// Creates a new [`ScalekitAuthProvider`] from configuration options.
///
/// This method:
/// - Normalizes the environment URL protocol
/// - Builds OAuth discovery URLs
/// - Pulls authorization server metadata
/// - Builds protected resource metadata
/// - Instantiates a JWK-based token verifier if no custom verifier is provided
///
/// # Errors
/// Returns [`McpSdkError`] if:
/// - URLs are invalid
/// - Metadata discovery fails
/// - JWK verifier initialization fails
pub async fn new<'a>(mut options: ScalekitAuthOptions<'a>) -> Result<Self, McpSdkError> {
// Normalize environment URL and add https:// if needed
let environment_url = if options.environment_url.starts_with("http://")
|| options.environment_url.starts_with("https://")
{
&options.environment_url
} else {
&format!("https://{}", options.environment_url)
};
let issuer = Url::parse(environment_url).map_err(|err| McpSdkError::Internal {
description: format!("invalid userinfo url :{err}"),
})?;
// Build discovery document URL for this resource
let discovery_url = join_url(
&issuer,
&format!(
"/.well-known/oauth-authorization-server/resources/{}",
options.resource_id
),
)
.map_err(|err| McpSdkError::Internal {
description: format!("invalid userinfo url :{err}"),
})?;
let (endpoint_map, protected_resource_metadata_url) =
create_discovery_endpoints(&options.mcp_server_url)?;
let required_scopes: Vec<String> = options
.required_scopes
.take()
.unwrap_or_default()
.iter()
.map(|s| s.to_string())
.collect();
let mut builder = AuthMetadataBuilder::from_discovery_url(
discovery_url.as_str(),
options.mcp_server_url,
required_scopes.clone(),
)
.await
.unwrap();
if let Some(resource_name) = options.resource_name.as_ref() {
builder = builder.resource_name(resource_name)
}
if let Some(resource_documentation) = options.resource_documentation.as_ref() {
builder = builder.service_documentation(resource_documentation)
}
let authorization_servers =
join_url(&issuer, &format!("/resources/{}", options.resource_id))
.map_err(|err| McpSdkError::Internal {
description: format!("invalid userinfo url :{err}"),
})?
.to_string();
builder = builder.authorization_servers(vec![&authorization_servers]);
if !required_scopes.is_empty() {
builder = builder.reqquired_scopes(required_scopes)
}
if let Some(resource_name) = options.resource_name.as_ref() {
builder = builder.resource_name(resource_name)
}
if let Some(resource_documentation) = options.resource_documentation.as_ref() {
builder = builder.service_documentation(resource_documentation)
}
let (auth_server_meta, protected_resource_meta) = builder.build()?;
let Some(jwks_uri) = auth_server_meta.jwks_uri.as_ref().map(|s| s.to_string()) else {
return Err(McpSdkError::Internal {
description: "jwks_uri is not defined!".to_string(),
});
};
let token_verifier: Box<dyn OauthTokenVerifier> = match options.token_verifier {
Some(verifier) => verifier,
None => Box::new(GenericOauthTokenVerifier::new(TokenVerifierOptions {
strategies: vec![VerificationStrategies::JWKs { jwks_uri }],
validate_audience: None,
validate_issuer: Some(issuer.to_string().trim_end_matches("/").to_string()),
cache_capacity: None,
})?),
};
Ok(Self {
endpoint_map,
protected_resource_metadata_url,
token_verifier,
auth_server_meta,
protected_resource_meta,
})
}
/// Helper to build JSON response for authorization server metadata with CORS.
fn handle_authorization_server_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
/// Helper to build JSON response for protected resource metadata with permissive CORS.
fn handle_protected_resource_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
use http_body_util::BodyExt;
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
}
#[async_trait]
impl AuthProvider for ScalekitAuthProvider {
/// Returns the map of supported OAuth discovery endpoints.
fn auth_endpoints(&self) -> Option<&HashMap<String, OauthEndpoint>> {
Some(&self.endpoint_map)
}
/// Handles incoming requests to OAuth metadata endpoints.
async fn handle_request(
&self,
request: http::Request<&str>,
state: Arc<McpAppState>,
) -> Result<http::Response<GenericBody>, TransportServerError> {
let Some(endpoint) = self.endpoint_type(&request) else {
return http::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(GenericBody::empty())
.map_err(|err| TransportServerError::HttpError(err.to_string()));
};
// return early if method is not allowed
if let Some(response) = self.validate_allowed_methods(endpoint, request.method()) {
return Ok(response);
}
match endpoint {
OauthEndpoint::AuthorizationServerMetadata => {
let json_payload = serde_json::to_string(&self.auth_server_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_authorization_server_metadata(json_payload) },
)
}),
)
.await
}
OauthEndpoint::ProtectedResourceMetadata => {
let json_payload = serde_json::to_string(&self.protected_resource_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_protected_resource_metadata(json_payload) },
)
}),
)
.await
}
_ => Ok(GenericBody::create_404_response()),
}
}
/// Verifies an access token using JWKs and optional UserInfo validation.
///
/// Returns authenticated `AuthInfo` on success.
async fn verify_token(&self, access_token: String) -> Result<AuthInfo, AuthenticationError> {
self.token_verifier.verify_token(access_token).await
}
/// Returns the full URL to the protected resource metadata document.
fn protected_resource_metadata_url(&self) -> Option<&str> {
Some(self.protected_resource_metadata_url.as_str())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/src/auth_provider/keycloak.rs | crates/rust-mcp-extra/src/auth_provider/keycloak.rs | use crate::token_verifier::{
GenericOauthTokenVerifier, TokenVerifierOptions, VerificationStrategies,
};
use async_trait::async_trait;
use bytes::Bytes;
use http::{header::CONTENT_TYPE, StatusCode};
use http_body_util::{BodyExt, Full};
use rust_mcp_sdk::{
auth::{
create_discovery_endpoints, AuthInfo, AuthMetadataBuilder, AuthProvider,
AuthenticationError, AuthorizationServerMetadata, OauthEndpoint,
OauthProtectedResourceMetadata, OauthTokenVerifier,
},
error::McpSdkError,
mcp_http::{middleware::CorsMiddleware, GenericBody, GenericBodyExt, Middleware},
mcp_server::{
error::{TransportServerError, TransportServerResult},
join_url, McpAppState,
},
};
use std::{collections::HashMap, sync::Arc};
static SCOPES_SUPPORTED: &[&str] = &[
"openid",
"acr",
"basic",
"web-origins",
"email",
"mcp:tools",
"address",
"profile",
"phone",
"roles",
"microprofile-jwt",
"service_account",
"offline_access",
"organization",
];
/// Configuration options for the Keycloak OAuth provider.
pub struct KeycloakAuthOptions<'a> {
/// Base URL of the Keycloak server (e.g. `https://keycloak.example.com`)
pub keycloak_base_url: String,
/// Public base URL of this MCP server (used for discovery endpoints)
pub mcp_server_url: String,
/// Scopes that must be present in the access token
pub required_scopes: Option<Vec<&'a str>>,
/// Client ID for confidential client (required for token introspection)
pub client_id: Option<String>,
/// Client secret for confidential client (required for token introspection)
pub client_secret: Option<String>,
/// Optional custom token verifier
pub token_verifier: Option<Box<dyn OauthTokenVerifier>>,
/// Human-readable name of the protected resource (optional, shown in discovery)
pub resource_name: Option<String>,
/// Documentation URL for this resource (optional)
pub resource_documentation: Option<String>,
}
/// Keycloak integration implementing `AuthProvider` for MCP servers.
///
/// This provider makes your MCP server compatible with clients that expect standard
/// OAuth2/OpenID Connect discovery endpoints (authorization server metadata and
/// protected resource metadata) when using Keycloak as the identity provider.
///
/// It supports multiple token verification strategies with the following precedence:
///
/// 1. JWKs-based verification (always enabled) – validates JWT signature, issuer, expiry, etc.
/// 2. Token Introspection (if client_id + client_secret provided) – active validation against Keycloak
/// 3. UserInfo endpoint validation (fallback when `openid` scope is required but no introspection credentials)
///
pub struct KeycloakAuthProvider {
auth_server_meta: AuthorizationServerMetadata,
protected_resource_meta: OauthProtectedResourceMetadata,
endpoint_map: HashMap<String, OauthEndpoint>,
protected_resource_metadata_url: String,
token_verifier: Box<dyn OauthTokenVerifier>,
}
impl KeycloakAuthProvider {
/// Creates a new KeycloakAuthProvider instance.
///
/// This method configures OAuth2/OpenID Connect discovery metadata and selects
/// the best available token verification strategy:
///
/// ### Verification Strategy Priority & Security Considerations
///
/// | Strategy | When Used | Security Level | Notes |
/// |------------------|---------------------------------------------------|----------------|-------|
/// | JWKs (local) | Always | High | Validates signature, `iss`, `exp`, `nbf`, etc. No network call. |
/// | Introspection | When `client_id` + `client_secret` are provided | Highest | Active validation with Keycloak. Detects revoked/expired tokens immediately. Recommended for production. |
/// | UserInfo | Fallback when `openid` scope is required but no introspection credentials | Medium | Validates token by calling `/userinfo`. Less secure than introspection (some IdPs accept invalid tokens). |
///
/// Warning: If neither introspection nor `openid` scope is configured, only local JWT validation occurs.
/// This means revoked tokens may still be accepted until they expire.
///
/// Recommendation: Always provide `client_id` and `client_secret` in production for full revocation support.
///
pub fn new(mut options: KeycloakAuthOptions) -> Result<Self, McpSdkError> {
let (endpoint_map, protected_resource_metadata_url) =
create_discovery_endpoints(&options.mcp_server_url)?;
let required_scopes = options.required_scopes.take();
let scopes_supported = required_scopes.clone().unwrap_or(SCOPES_SUPPORTED.to_vec());
let mut builder = AuthMetadataBuilder::new(&options.mcp_server_url)
.issuer(&options.keycloak_base_url)
.authorization_servers(vec![&options.keycloak_base_url])
.authorization_endpoint("/protocol/openid-connect/auth")
.introspection_endpoint("/protocol/openid-connect/token/introspect")
.registration_endpoint("/clients-registrations/openid-connect")
.token_endpoint("/protocol/openid-connect/token")
.revocation_endpoint("/protocol/openid-connect/revoke")
.jwks_uri("/protocol/openid-connect/certs")
.scopes_supported(scopes_supported);
let has_openid_scope =
matches!(required_scopes.as_ref(), Some(scopes) if scopes.contains(&"openid"));
if let Some(scopes) = required_scopes {
builder = builder.reqquired_scopes(scopes)
}
if let Some(resource_name) = options.resource_name.as_ref() {
builder = builder.resource_name(resource_name)
}
if let Some(resource_documentation) = options.resource_documentation.as_ref() {
builder = builder.service_documentation(resource_documentation)
}
let (auth_server_meta, protected_resource_meta) = builder.build()?;
let Some(jwks_uri) = auth_server_meta.jwks_uri.as_ref().map(|s| s.to_string()) else {
return Err(McpSdkError::Internal {
description: "jwks_uri is not defined!".to_string(),
});
};
let mut strategies = Vec::with_capacity(2);
strategies.push(VerificationStrategies::JWKs { jwks_uri });
if let (Some(client_id), Some(client_secret), Some(introspection_uri)) = (
options.client_id.take(),
options.client_secret.take(),
auth_server_meta.introspection_endpoint.as_ref(),
) {
strategies.push(VerificationStrategies::Introspection {
introspection_uri: introspection_uri.to_string(),
client_id,
client_secret,
use_basic_auth: true,
extra_params: Some(vec![("token_type_hint", "access_token")]),
});
} else if has_openid_scope {
let userinfo_uri = join_url(
&auth_server_meta.issuer,
"/protocol/openid-connect/userinfo",
)
.map_err(|err| McpSdkError::Internal {
description: format!("invalid userinfo url :{err}"),
})?
.to_string();
strategies.push(VerificationStrategies::UserInfo { userinfo_uri })
} else {
tracing::warn!("Keycloak token verification is missing both Introspection and UserInfo strategies. Please provide client_id and client_secret, or ensure openid is included as a required scope.")
};
let token_verifier: Box<dyn OauthTokenVerifier> = match options.token_verifier {
Some(verifier) => verifier,
None => Box::new(GenericOauthTokenVerifier::new(TokenVerifierOptions {
strategies,
validate_audience: None,
validate_issuer: Some(options.keycloak_base_url.clone()),
cache_capacity: None,
})?),
};
Ok(Self {
endpoint_map,
protected_resource_metadata_url,
token_verifier,
auth_server_meta,
protected_resource_meta,
})
}
/// Helper to build JSON response for authorization server metadata with CORS.
fn handle_authorization_server_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
/// Helper to build JSON response for protected resource metadata with permissive CORS.
fn handle_protected_resource_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
use http_body_util::BodyExt;
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
}
#[async_trait]
impl AuthProvider for KeycloakAuthProvider {
/// Returns the map of supported OAuth discovery endpoints.
fn auth_endpoints(&self) -> Option<&HashMap<String, OauthEndpoint>> {
Some(&self.endpoint_map)
}
/// Handles incoming requests to OAuth metadata endpoints.
async fn handle_request(
&self,
request: http::Request<&str>,
state: Arc<McpAppState>,
) -> Result<http::Response<GenericBody>, TransportServerError> {
let Some(endpoint) = self.endpoint_type(&request) else {
return http::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(GenericBody::empty())
.map_err(|err| TransportServerError::HttpError(err.to_string()));
};
// return early if method is not allowed
if let Some(response) = self.validate_allowed_methods(endpoint, request.method()) {
return Ok(response);
}
match endpoint {
OauthEndpoint::AuthorizationServerMetadata => {
let json_payload = serde_json::to_string(&self.auth_server_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_authorization_server_metadata(json_payload) },
)
}),
)
.await
}
OauthEndpoint::ProtectedResourceMetadata => {
let json_payload = serde_json::to_string(&self.protected_resource_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_protected_resource_metadata(json_payload) },
)
}),
)
.await
}
_ => Ok(GenericBody::create_404_response()),
}
}
/// Verifies an access token using JWKs and optional UserInfo validation.
///
/// Returns authenticated `AuthInfo` on success.
async fn verify_token(&self, access_token: String) -> Result<AuthInfo, AuthenticationError> {
self.token_verifier.verify_token(access_token).await
}
/// Returns the full URL to the protected resource metadata document.
fn protected_resource_metadata_url(&self) -> Option<&str> {
Some(self.protected_resource_metadata_url.as_str())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/examples/keycloak-auth.rs | crates/rust-mcp-extra/examples/keycloak-auth.rs | mod common;
use crate::common::{
handler::McpServerHandler,
utils::{create_server_info, enable_tracing},
};
use rust_mcp_extra::auth_provider::keycloak::{KeycloakAuthOptions, KeycloakAuthProvider};
use rust_mcp_sdk::{
error::SdkResult,
mcp_server::{hyper_server, HyperServerOptions},
ToMcpServerHandler,
};
use std::{env, sync::Arc};
#[tokio::main]
async fn main() -> SdkResult<()> {
enable_tracing();
let server_details = create_server_info("Keycloak Oauth Test MCP Server");
let handler = McpServerHandler {};
let auth_provider = KeycloakAuthProvider::new(KeycloakAuthOptions {
keycloak_base_url: env::var("AUTH_SERVER")
.unwrap_or("http://localhost:8080/realms/master".to_string()),
mcp_server_url: "http://localhost:3000".to_string(),
resource_name: Some("Keycloak Oauth Test MCP Server".to_string()),
required_scopes: Some(vec!["mcp:tools"]),
client_id: env::var("CLIENT_ID").ok(),
client_secret: env::var("CLIENT_SECRET").ok(),
token_verifier: None,
resource_documentation: None,
})?;
let server = hyper_server::create_server(
server_details,
handler.to_mcp_server_handler(),
HyperServerOptions {
host: "localhost".to_string(),
port: 3000,
custom_streamable_http_endpoint: Some("/".to_string()),
auth: Some(Arc::new(auth_provider)), // enable authentication
sse_support: false,
..Default::default()
},
);
server.start().await?;
Ok(())
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/examples/workos-auth.rs | crates/rust-mcp-extra/examples/workos-auth.rs | mod common;
use crate::common::{
handler::McpServerHandler,
utils::{create_server_info, enable_tracing},
};
use rust_mcp_extra::auth_provider::work_os::{WorkOSAuthOptions, WorkOsAuthProvider};
use rust_mcp_sdk::{
error::SdkResult,
mcp_server::{hyper_server, HyperServerOptions},
ToMcpServerHandler,
};
use std::{env, sync::Arc};
#[tokio::main]
async fn main() -> SdkResult<()> {
enable_tracing();
let server_details = create_server_info("Workos Oauth Test MCP Server");
let handler = McpServerHandler {};
let auth_provider = WorkOsAuthProvider::new(WorkOSAuthOptions {
authkit_domain: env::var("AUTH_SERVER")
.unwrap_or("https://stalwart-opera-85-staging.authkit.app".to_string()),
mcp_server_url: "http://127.0.0.1:3000/mcp".to_string(),
required_scopes: Some(vec!["openid", "profile"]),
resource_name: Some("Workos Oauth Test MCP Server".to_string()),
resource_documentation: None,
token_verifier: None,
})?;
let server = hyper_server::create_server(
server_details,
handler.to_mcp_server_handler(),
HyperServerOptions {
host: "127.0.0.1".to_string(),
port: 3000,
auth: Some(Arc::new(auth_provider)), // enable authentication
sse_support: false,
..Default::default()
},
);
server.start().await?;
Ok(())
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/examples/scalekit-auth.rs | crates/rust-mcp-extra/examples/scalekit-auth.rs | mod common;
use crate::common::{
handler::McpServerHandler,
utils::{create_server_info, enable_tracing},
};
use rust_mcp_extra::auth_provider::scalekit::{ScalekitAuthOptions, ScalekitAuthProvider};
use rust_mcp_sdk::{
error::SdkResult,
event_store::InMemoryEventStore,
mcp_server::{hyper_server, HyperServerOptions},
ToMcpServerHandler,
};
use std::{env, sync::Arc};
#[tokio::main]
async fn main() -> SdkResult<()> {
enable_tracing();
let server_details = create_server_info("Scalekit Oauth Test MCP Server");
let handler = McpServerHandler {};
let auth_provider = ScalekitAuthProvider::new(ScalekitAuthOptions {
mcp_server_url: "http://127.0.0.1:3000/mcp".to_string(),
required_scopes: Some(vec!["profile"]),
token_verifier: None,
resource_name: Some("Scalekit Oauth Test MCP Server".to_string()),
resource_documentation: None,
environment_url: env::var("ENVIRONMENT_URL")
.expect("Please set 'ENVIRONMENT_URL' evnrionment variable and try again."),
resource_id: env::var("RESOURCE_ID")
.expect("Please set 'RESOURCE_ID' evnrionment variable and try again."),
})
.await?;
let server = hyper_server::create_server(
server_details,
handler.to_mcp_server_handler(),
HyperServerOptions {
host: "127.0.0.1".to_string(),
port: 8080,
event_store: Some(std::sync::Arc::new(InMemoryEventStore::default())), // enable resumability
auth: Some(Arc::new(auth_provider)), // enable authentication
sse_support: false,
..Default::default()
},
);
server.start().await?;
Ok(())
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/examples/common/utils.rs | crates/rust-mcp-extra/examples/common/utils.rs | use rust_mcp_sdk::{
mcp_icon,
schema::{
Implementation, InitializeResult, ServerCapabilities, ServerCapabilitiesTools,
LATEST_PROTOCOL_VERSION,
},
};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
pub fn create_server_info(server_name: &str) -> InitializeResult {
InitializeResult {
server_info: Implementation {
name: server_name.to_string(),
version: "0.1.0".to_string(),
title: Some(server_name.to_string()),
description: Some(server_name.to_string()),
icons: vec![
mcp_icon!(
src = "https://raw.githubusercontent.com/rust-mcp-stack/rust-mcp-sdk/main/assets/rust-mcp-icon.png",
mime_type = "image/png",
sizes = ["128x128"],
theme = "dark"
)
],
website_url: Some("https://github.com/rust-mcp-stack/rust-mcp-sdk".to_string()),
},
capabilities: ServerCapabilities {
tools: Some(ServerCapabilitiesTools { list_changed: None }),
..Default::default()
},
meta: None,
instructions: None,
protocol_version: LATEST_PROTOCOL_VERSION.to_string(),
}
}
pub fn enable_tracing() {
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info".into()),
)
.with(tracing_subscriber::fmt::layer())
.init();
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/examples/common/mod.rs | crates/rust-mcp-extra/examples/common/mod.rs | pub mod handler;
pub mod tool;
pub mod utils;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/examples/common/tool.rs | crates/rust-mcp-extra/examples/common/tool.rs | use rust_mcp_sdk::{
auth::AuthInfo,
macros::{mcp_tool, JsonSchema},
schema::{schema_utils::CallToolError, CallToolResult, TextContent},
};
//*******************************//
// Show Authentication Info //
//*******************************//
#[mcp_tool(
name = "show_auth_info",
description = "Shows current user authentication info in json format"
)]
#[derive(Debug, ::serde::Deserialize, ::serde::Serialize, JsonSchema, Default)]
pub struct ShowAuthInfo {}
impl ShowAuthInfo {
pub fn call_tool(&self, auth_info: Option<AuthInfo>) -> Result<CallToolResult, CallToolError> {
let auth_info_json = serde_json::to_string_pretty(&auth_info).map_err(|err| {
CallToolError::from_message(format!("Undable to display auth info as string :{err}"))
})?;
Ok(CallToolResult::text_content(vec![TextContent::from(
auth_info_json,
)]))
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-extra/examples/common/handler.rs | crates/rust-mcp-extra/examples/common/handler.rs | use crate::common::tool::ShowAuthInfo;
use async_trait::async_trait;
use rust_mcp_sdk::{
mcp_server::ServerHandler,
schema::{
schema_utils::CallToolError, CallToolRequestParams, CallToolResult, ListToolsResult,
PaginatedRequestParams, RpcError,
},
McpServer,
};
use std::sync::Arc;
pub struct McpServerHandler;
#[async_trait]
impl ServerHandler for McpServerHandler {
// Handle ListToolsRequest, return list of available tools as ListToolsResult
async fn handle_list_tools_request(
&self,
_request: Option<PaginatedRequestParams>,
_runtime: Arc<dyn McpServer>,
) -> std::result::Result<ListToolsResult, RpcError> {
Ok(ListToolsResult {
meta: None,
next_cursor: None,
tools: vec![ShowAuthInfo::tool()],
})
}
/// Handles incoming CallToolRequest and processes it using the appropriate tool.
async fn handle_call_tool_request(
&self,
params: CallToolRequestParams,
runtime: Arc<dyn McpServer>,
) -> std::result::Result<CallToolResult, CallToolError> {
if params.name.eq(&ShowAuthInfo::tool_name()) {
let tool = ShowAuthInfo::default();
tool.call_tool(runtime.auth_info_cloned().await)
} else {
Err(CallToolError::from_message(format!(
"Tool \"{}\" does not exists or inactive!",
params.name,
)))
}
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/session_store.rs | crates/rust-mcp-sdk/src/session_store.rs | mod in_memory_session_store;
use crate::mcp_server::ServerRuntime;
use async_trait::async_trait;
pub use in_memory_session_store::*;
use rust_mcp_transport::SessionId;
use std::sync::Arc;
/// Trait defining the interface for session storage operations
///
/// This trait provides asynchronous methods for managing session data,
/// Implementors must be Send and Sync to support concurrent access.
#[async_trait]
pub trait SessionStore: Send + Sync {
/// Retrieves a session by its identifier
///
/// # Arguments
/// * `key` - The session identifier to look up
///
/// # Returns
/// * `Option<Arc<ServerRuntime>>` - The session stream if found, None otherwise
async fn get(&self, key: &SessionId) -> Option<Arc<ServerRuntime>>;
/// Stores a new session with the given identifier
///
/// # Arguments
/// * `key` - The session identifier
/// * `value` - The duplex stream to store
async fn set(&self, key: SessionId, value: Arc<ServerRuntime>);
/// Deletes a session by its identifier
///
/// # Arguments
/// * `key` - The session identifier to delete
async fn delete(&self, key: &SessionId);
async fn has(&self, session: &SessionId) -> bool;
async fn keys(&self) -> Vec<SessionId>;
async fn values(&self) -> Vec<Arc<ServerRuntime>>;
/// Clears all sessions from the store
async fn clear(&self);
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers.rs | crates/rust-mcp-sdk/src/hyper_servers.rs | pub mod error;
pub mod hyper_runtime;
pub mod hyper_server;
pub mod hyper_server_core;
mod routes;
mod server;
pub use server::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/lib.rs | crates/rust-mcp-sdk/src/lib.rs | pub mod error;
#[cfg(feature = "hyper-server")]
mod hyper_servers;
mod mcp_handlers;
#[cfg(any(feature = "hyper-server", feature = "auth"))]
pub mod mcp_http;
mod mcp_macros;
mod mcp_runtimes;
mod mcp_traits;
#[cfg(any(feature = "server", feature = "hyper-server"))]
pub mod session_store;
pub mod task_store;
mod utils;
#[cfg(feature = "client")]
pub mod mcp_client {
//! Includes the runtimes and traits required to create a type-safe MCP client.
//!
//!
//! **Choosing Between `client_runtime` and `client_runtime_core` ?**
//!
//! [rust-mcp-sdk](https://github.com/rust-mcp-stack/rust-mcp-sdk) provides two type of runtimes that you can chose from:
//! - **client_runtime** : This is recommended runtime to be used for most MCP projects, and
//! it works with `mcp_server_handler` trait
//! that offers default implementation for common messages like handling initialization or
//! responding to ping requests, so you only need to override and customize the handler
//! functions relevant to your specific needs.
//!
//! Refer to [examples/simple-mcp-client-stdio](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/simple-mcp-client-stdio) for an example.
//!
//!
//! - **client_runtime_core**: If you need more control over MCP messages, consider using
//! `client_runtime_core` that goes with works with `mcp_server_handler_core` trait which offers
//! methods to manage the three MCP message types: request, notification, and error.
//! While still providing type-safe objects in these methods, it allows you to determine how to
//! handle each message based on its type and parameters.
//!
//! Refer to [examples/simple-mcp-client-stdio-core](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/simple-mcp-client-stdio-core) for an example.
pub use super::mcp_handlers::mcp_client_handler::ClientHandler;
pub use super::mcp_handlers::mcp_client_handler_core::ClientHandlerCore;
pub use super::mcp_runtimes::client_runtime::mcp_client_runtime as client_runtime;
pub use super::mcp_runtimes::client_runtime::mcp_client_runtime_core as client_runtime_core;
pub use super::mcp_runtimes::client_runtime::{ClientRuntime, McpClientOptions};
pub use super::mcp_traits::{McpClientHandler, ToMcpClientHandler, ToMcpClientHandlerCore};
pub use super::utils::ensure_server_protocole_compatibility;
}
#[cfg(feature = "server")]
pub mod mcp_server {
//! Includes the runtimes and traits required to create a type-safe MCP server.
//!
//!
//! **Choosing Between `server_runtime` and `server_runtime_core` ?**
//!
//! [rust-mcp-sdk](https://github.com/rust-mcp-stack/rust-mcp-sdk) provides two type of runtimes that you can chose from:
//! - **server_runtime** : This is recommended runtime to be used for most MCP projects, and
//! it works with `mcp_server_handler` trait
//! that offers default implementation for common messages like handling initialization or
//! responding to ping requests, so you only need to override and customize the handler
//! functions relevant to your specific needs.
//!
//! Refer to [examples/hello-world-mcp-server-stdio](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/hello-world-mcp-server-stdio) for an example.
//!
//!
//! - **server_runtime_core**: If you need more control over MCP messages, consider using
//! `server_runtime_core` that goes with works with `mcp_server_handler_core` trait which offers
//! methods to manage the three MCP message types: request, notification, and error.
//! While still providing type-safe objects in these methods, it allows you to determine how to
//! handle each message based on its type and parameters.
//!
//! Refer to [examples/hello-world-mcp-server-stdio-core](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/hello-world-mcp-server-stdio-core) for an example.
pub use super::mcp_handlers::mcp_server_handler::ServerHandler;
pub use super::mcp_handlers::mcp_server_handler_core::ServerHandlerCore;
pub use super::mcp_runtimes::server_runtime::mcp_server_runtime as server_runtime;
pub use super::mcp_runtimes::server_runtime::mcp_server_runtime_core as server_runtime_core;
pub use super::mcp_runtimes::server_runtime::{McpServerOptions, ServerRuntime};
#[cfg(feature = "hyper-server")]
pub use super::hyper_servers::*;
pub use super::utils::enforce_compatible_protocol_version;
#[cfg(feature = "auth")]
pub use super::utils::join_url;
#[cfg(feature = "hyper-server")]
pub use super::mcp_http::{McpAppState, McpHttpHandler};
pub use super::mcp_traits::{McpServerHandler, ToMcpServerHandler, ToMcpServerHandlerCore};
}
pub mod auth;
pub use mcp_traits::*;
pub use rust_mcp_transport::error::*;
pub use rust_mcp_transport::*;
#[cfg(feature = "macros")]
pub mod macros {
pub use rust_mcp_macros::*;
}
pub mod id_generator;
pub mod schema {
pub use rust_mcp_schema::schema_utils::*;
pub use rust_mcp_schema::*;
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_traits.rs | crates/rust-mcp-sdk/src/mcp_traits.rs | pub(super) mod id_generator;
#[cfg(feature = "client")]
mod mcp_client;
mod mcp_handler;
#[cfg(feature = "server")]
mod mcp_server;
mod request_id_gen;
pub use id_generator::*;
#[cfg(feature = "client")]
pub use mcp_client::*;
pub use mcp_handler::*;
#[cfg(feature = "server")]
pub use mcp_server::*;
pub use request_id_gen::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/task_store.rs | crates/rust-mcp-sdk/src/task_store.rs | mod in_memory_task_store;
use async_trait::async_trait;
use futures::Stream;
pub use in_memory_task_store::*;
use rust_mcp_schema::{
schema_utils::{
ClientJsonrpcRequest, ResultFromClient, ResultFromServer, ServerJsonrpcRequest,
},
ListTasksResult, RequestId, Task, TaskStatus, TaskStatusNotificationParams,
};
use std::{fmt::Debug, pin::Pin, sync::Arc};
use crate::error::SdkResult;
/// A stream of task status notifications, where each item contains the notification parameters
/// and an optional session_id
pub type TaskStatusStream =
Pin<Box<dyn Stream<Item = (TaskStatusNotificationParams, Option<String>)> + Send + 'static>>;
#[async_trait]
pub trait TaskStatusSignal: Send + Sync + 'static {
/// Publish a status change event
async fn publish_status_change(
&self,
event: TaskStatusNotificationParams,
session_id: Option<&String>,
);
/// Return a new independent stream of events
fn subscribe(&self) -> Option<TaskStatusStream> {
None
}
}
pub type TaskStatusCallback = Box<dyn Fn(&Task, Option<&String>) + Send + Sync + 'static>;
pub struct CreateTaskOptions {
///Actual retention duration from creation in milliseconds, None for unlimited.
pub ttl: Option<i64>,
pub poll_interval: ::std::option::Option<i64>,
///Additional context to pass to the task store.
pub meta: Option<serde_json::Map<String, serde_json::Value>>,
// pub context: Option<HashMap<String, Box<dyn Any + Send>>>,
}
pub struct TaskCreator<Req, Res>
where
Req: Debug + Clone + serde::Deserialize<'static> + serde::Serialize,
Res: Debug + Clone + serde::Deserialize<'static> + serde::Serialize,
{
pub request_id: RequestId,
pub request: Req,
pub session_id: Option<String>,
pub task_store: Arc<dyn TaskStore<Req, Res>>,
}
impl<Req, Res> TaskCreator<Req, Res>
where
Req: Debug + Clone + serde::Deserialize<'static> + serde::Serialize + 'static,
Res: Debug + Clone + serde::Deserialize<'static> + serde::Serialize + 'static,
{
pub async fn create_task(self, task_params: CreateTaskOptions) -> Task {
self.task_store
.create_task(task_params, self.request_id, self.request, self.session_id)
.await
}
}
/// A trait for storing and managing long-running tasks, storing and retrieving task state and results.
/// Tasks were introduced in MCP Protocol version 2025-11-25.
/// For more details, see: <https://modelcontextprotocol.io/specification/2025-11-25/basic/utilities/tasks>
#[async_trait]
pub trait TaskStore<Req, Res>: Send + Sync + TaskStatusSignal
where
Req: Debug + Clone + serde::Deserialize<'static> + serde::Serialize,
Res: Debug + Clone + serde::Deserialize<'static> + serde::Serialize,
{
/// Creates a new task with the given creation parameters and original request.
/// The implementation must generate a unique taskId and createdAt timestamp.
///
/// TTL Management:
/// - The implementation receives the TTL suggested by the requestor via taskParams.ttl
/// - The implementation MAY override the requested TTL (e.g., to enforce limits)
/// - The actual TTL used MUST be returned in the Task object
/// - Null TTL indicates unlimited task lifetime (no automatic cleanup)
/// - Cleanup SHOULD occur automatically after TTL expires, regardless of task status
///
/// # Arguments
/// * `task_params` - The task creation parameters from the request (ttl, pollInterval)
/// * `request_id` - The JSON-RPC request ID
/// * `request` - The original request that triggered task creation
/// * `session_id` - Optional session ID for binding the task to a specific session
///
/// # Returns
/// The created task object
async fn create_task(
&self,
task_params: CreateTaskOptions,
request_id: RequestId,
request: Req,
session_id: Option<String>,
) -> Task;
/// Begins active polling for task status updates in requestor mode.
/// This method spawns a long-running background task that drives the polling
/// schedule for all tasks managed by this store. It repeatedly invokes the
/// provided `get_task_callback` to query the **receiver** for the current status
/// of pending tasks.
///
/// The polling loop should respect the `pollInterval` suggested by the receiver and
/// dynamically adjusts accordingly. Each task is polled until it reaches a
/// terminal state (`Completed`, `Failed`, or `Cancelled`), at which point it
/// is removed from the active polling schedule.
///
/// This mechanism is used when the local side acts as the **requestor** in the
/// Model Context Protocol task flow — i.e., when a task-augmented request has
/// been sent to the remote side (the receiver) and the local side needs to
/// actively monitor progress via repeated `tasks/get` calls.
fn start_task_polling(&self, get_task_callback: TaskStatusPoller) -> SdkResult<()>;
/// Waits asynchronously for the result of a task.
///
/// # Arguments
///
/// * `task_id` - The unique identifier of the task whose result is awaited.
/// * `session_id` - Optional session identifier used to disambiguate or scope the task.
///
/// # Returns
///
/// * `Ok(Res)` if the task completes successfully and sends its result.
/// * `Err(SdkError)` if:
/// - the task does not exist,
/// - the task result channel is dropped before sending,
/// - or an internal error occurs.
///
/// # Errors
///
/// Returns an internal RPC error if the task does not exist or if the sender
/// side of the oneshot channel is dropped before producing a result.
async fn wait_for_task_result(
&self,
task_id: &str,
session_id: Option<String>,
) -> SdkResult<(TaskStatus, Option<Res>)>;
/// Gets the current status of a task.
///
/// # Arguments
/// * `task_id` - The task identifier
/// * `session_id` - Optional session ID for binding the query to a specific session
///
/// # Returns
/// The task object, or None if it does not exist
async fn get_task(&self, task_id: &str, session_id: Option<String>) -> Option<Task>;
/// Stores the result of a task and sets its final status.
///
/// # Arguments
/// * `task_id` - The task identifier
/// * `status` - The final status: 'completed' for success, 'failed' for errors
/// * `result` - The result to store
/// * `session_id` - Optional session ID for binding the operation to a specific session
async fn store_task_result(
&self,
task_id: &str,
status: TaskStatus,
result: Res,
session_id: Option<&String>,
) -> ();
/// Retrieves the stored result of a task.
///
/// # Arguments
/// * `task_id` - The task identifier
/// * `session_id` - Optional session ID for binding the query to a specific session
///
/// # Returns
/// The stored result
async fn get_task_result(&self, task_id: &str, session_id: Option<String>) -> Option<Res>;
/// Updates a task's status (e.g., to 'cancelled', 'failed', 'completed').
///
/// # Arguments
/// * `task_id` - The task identifier
/// * `status` - The new status
/// * `status_message` - Optional diagnostic message for failed tasks or other status information
/// * `session_id` - Optional session ID for binding the operation to a specific session
async fn update_task_status(
&self,
task_id: &str,
status: TaskStatus,
status_message: Option<String>,
session_id: Option<String>,
) -> ();
/// Lists tasks, optionally starting from a pagination cursor.
///
/// # Arguments
/// * `cursor` - Optional cursor for pagination
/// * `session_id` - Optional session ID for binding the query to a specific session
///
/// # Returns
/// An object containing the tasks array and an optional nextCursor
async fn list_tasks(
&self,
cursor: Option<String>,
session_id: Option<String>,
) -> ListTasksResult;
}
pub type ServerTaskCreator = TaskCreator<ClientJsonrpcRequest, ResultFromServer>;
pub type ClientTaskCreator = TaskCreator<ServerJsonrpcRequest, ResultFromClient>;
pub type ServerTaskStore = dyn TaskStore<ClientJsonrpcRequest, ResultFromServer>;
pub type ClientTaskStore = dyn TaskStore<ServerJsonrpcRequest, ResultFromClient>;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/error.rs | crates/rust-mcp-sdk/src/error.rs | #[cfg(feature = "auth")]
use crate::auth::AuthenticationError;
use crate::schema::{ParseProtocolVersionError, RpcError};
use rust_mcp_transport::error::TransportError;
use thiserror::Error;
use tokio::task::JoinError;
#[cfg(feature = "hyper-server")]
use crate::hyper_servers::error::TransportServerError;
pub type SdkResult<T> = core::result::Result<T, McpSdkError>;
#[derive(Debug, Error)]
pub enum McpSdkError {
#[error("Transport error: {0}")]
Transport(#[from] TransportError),
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("{0}")]
RpcError(#[from] RpcError),
#[error("{0}")]
Join(#[from] JoinError),
#[cfg(feature = "hyper-server")]
#[error("{0}")]
HyperServer(#[from] TransportServerError),
#[cfg(feature = "auth")]
#[error("{0}")]
AuthenticationError(#[from] AuthenticationError),
#[error("{0}")]
SdkError(#[from] crate::schema::schema_utils::SdkError),
#[error("Protocol error: {kind}")]
Protocol { kind: ProtocolErrorKind },
#[error("Server error: {description}")]
Internal { description: String },
}
// Sub-enum for protocol-related errors
#[derive(Debug, Error)]
pub enum ProtocolErrorKind {
#[error("Incompatible protocol version: requested {requested}, current {current}")]
IncompatibleVersion { requested: String, current: String },
#[error("Failed to parse protocol version: {0}")]
ParseError(#[from] ParseProtocolVersionError),
}
impl McpSdkError {
/// Returns the RPC error message if the error is of type `McpSdkError::RpcError`.
pub fn rpc_error_message(&self) -> Option<&String> {
if let McpSdkError::RpcError(rpc_error) = self {
return Some(&rpc_error.message);
}
None
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/schema.rs | crates/rust-mcp-sdk/src/schema.rs | pub use rust_mcp_schema::mcp_2025_11_25::*;
// always export
pub use rust_mcp_schema::{ParseProtocolVersionError, ProtocolVersion};
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth.rs | crates/rust-mcp-sdk/src/auth.rs | mod auth_info;
#[cfg(feature = "auth")]
mod auth_provider;
#[cfg(feature = "auth")]
mod error;
#[cfg(feature = "auth")]
mod metadata;
mod spec;
#[cfg(feature = "auth")]
mod token_verifier;
pub use auth_info::AuthInfo;
#[cfg(feature = "auth")]
pub use auth_provider::*;
#[cfg(feature = "auth")]
pub use error::*;
#[cfg(feature = "auth")]
pub use metadata::*;
pub use spec::Audience;
#[cfg(feature = "auth")]
pub use spec::*;
#[cfg(feature = "auth")]
pub use token_verifier::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/utils.rs | crates/rust-mcp-sdk/src/utils.rs | use crate::error::{McpSdkError, ProtocolErrorKind, SdkResult};
use crate::schema::{ClientMessages, ProtocolVersion, SdkError};
use std::cmp::Ordering;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use time::format_description::well_known::Iso8601;
use time::OffsetDateTime;
#[cfg(feature = "auth")]
use url::Url;
/// A guard type that automatically aborts a Tokio task when dropped.
///
/// This ensures that the associated task does not outlive the scope
/// of this struct, preventing runaway or leaked background tasks.
///
pub struct AbortTaskOnDrop {
/// The handle used to abort the spawned Tokio task.
pub handle: tokio::task::AbortHandle,
}
impl Drop for AbortTaskOnDrop {
fn drop(&mut self) {
// Automatically abort the associated task when this guard is dropped.
self.handle.abort();
}
}
// Function to convert Unix timestamp to SystemTime
pub fn unix_timestamp_to_systemtime(timestamp: u64) -> SystemTime {
UNIX_EPOCH + Duration::from_secs(timestamp)
}
/// Checks if the client and server protocol versions are compatible by ensuring they are equal.
///
/// This function compares the provided client and server protocol versions. If they are equal,
/// it returns `Ok(())`, indicating compatibility. If they differ (either the client version is
/// lower or higher than the server version), it returns an error with details about the
/// incompatible versions.
///
/// # Arguments
///
/// * `client_protocol_version` - A string slice representing the client's protocol version.
/// * `server_protocol_version` - A string slice representing the server's protocol version.
///
/// # Returns
///
/// * `Ok(())` if the versions are equal.
/// * `Err(McpSdkError::IncompatibleProtocolVersion)` if the versions differ, containing the
/// client and server versions as strings.
///
/// # Examples
///
/// ```
/// use rust_mcp_sdk::mcp_client::ensure_server_protocole_compatibility;
/// use rust_mcp_sdk::error::McpSdkError;
///
/// // Compatible versions
/// let result = ensure_server_protocole_compatibility("2024_11_05", "2024_11_05");
/// assert!(result.is_ok());
///
/// // Incompatible versions (requested < current)
/// let result = ensure_server_protocole_compatibility("2024_11_05", "2025_03_26");
/// assert!(matches!(
/// result,
/// Err(McpSdkError::Protocol{kind: rust_mcp_sdk::error::ProtocolErrorKind::IncompatibleVersion {requested, current}})
/// if requested == "2024_11_05" && current == "2025_03_26"
/// ));
///
/// // Incompatible versions (requested > current)
/// let result = ensure_server_protocole_compatibility("2025_03_26", "2024_11_05");
/// assert!(matches!(
/// result,
/// Err(McpSdkError::Protocol{kind: rust_mcp_sdk::error::ProtocolErrorKind::IncompatibleVersion {requested, current}})
/// if requested == "2025_03_26" && current == "2024_11_05"
/// ));
/// ```
#[allow(unused)]
pub fn ensure_server_protocole_compatibility(
client_protocol_version: &str,
server_protocol_version: &str,
) -> SdkResult<()> {
match client_protocol_version.cmp(server_protocol_version) {
Ordering::Less | Ordering::Greater => Err(McpSdkError::Protocol {
kind: ProtocolErrorKind::IncompatibleVersion {
requested: client_protocol_version.to_string(),
current: server_protocol_version.to_string(),
},
}),
Ordering::Equal => Ok(()),
}
}
/// Enforces protocol version compatibility on for MCP Server , allowing the client to use a lower or equal version.
///
/// This function compares the client and server protocol versions. If the client version is
/// higher than the server version, it returns an error indicating incompatibility. If the
/// versions are equal, it returns `Ok(None)`, indicating no downgrade is needed. If the client
/// version is lower, it returns `Ok(Some(client_protocol_version))`, suggesting the server
/// can use the client's version for compatibility.
///
/// # Arguments
///
/// * `client_protocol_version` - The client's protocol version.
/// * `server_protocol_version` - The server's protocol version.
///
/// # Returns
///
/// * `Ok(None)` if the versions are equal, indicating no downgrade is needed.
/// * `Ok(Some(client_protocol_version))` if the client version is lower, returning the client
/// version to use for compatibility.
/// * `Err(McpSdkError::IncompatibleProtocolVersion)` if the client version is higher, containing
/// the client and server versions as strings.
///
/// # Examples
///
/// ```
/// use rust_mcp_sdk::mcp_server::enforce_compatible_protocol_version;
/// use rust_mcp_sdk::error::McpSdkError;
///
/// // Equal versions
/// let result = enforce_compatible_protocol_version("2024_11_05", "2024_11_05");
/// assert!(matches!(result, Ok(None)));
///
/// // Client version lower (downgrade allowed)
/// let result = enforce_compatible_protocol_version("2024_11_05", "2025_03_26");
/// assert!(matches!(result, Ok(Some(ref v)) if v == "2024_11_05"));
///
/// // Client version higher (incompatible)
/// let result = enforce_compatible_protocol_version("2025_03_26", "2024_11_05");
/// assert!(matches!(
/// result,
/// Err(McpSdkError::Protocol{kind: rust_mcp_sdk::error::ProtocolErrorKind::IncompatibleVersion {requested, current}})
/// if requested == "2025_03_26" && current == "2024_11_05"
/// ));
/// ```
#[allow(unused)]
pub fn enforce_compatible_protocol_version(
client_protocol_version: &str,
server_protocol_version: &str,
) -> SdkResult<Option<String>> {
match client_protocol_version.cmp(server_protocol_version) {
// if client protocol version is higher
Ordering::Greater => Err(McpSdkError::Protocol {
kind: ProtocolErrorKind::IncompatibleVersion {
requested: client_protocol_version.to_string(),
current: server_protocol_version.to_string(),
},
}),
Ordering::Equal => Ok(None),
Ordering::Less => {
// return the same version that was received from the client
Ok(Some(client_protocol_version.to_string()))
}
}
}
pub fn validate_mcp_protocol_version(mcp_protocol_version: &str) -> SdkResult<()> {
let _mcp_protocol_version =
ProtocolVersion::try_from(mcp_protocol_version).map_err(|err| McpSdkError::Protocol {
kind: ProtocolErrorKind::ParseError(err),
})?;
Ok(())
}
/// Removes query string and hash fragment from a URL, returning the base path.
///
/// # Arguments
/// * `endpoint` - The URL or endpoint to process (e.g., "/messages?foo=bar#section1")
///
/// # Returns
/// A String containing the base path without query parameters or fragment
/// ```
#[allow(unused)]
pub(crate) fn remove_query_and_hash(endpoint: &str) -> String {
// Split off fragment (if any) and take the first part
let without_fragment = endpoint.split_once('#').map_or(endpoint, |(path, _)| path);
// Split off query string (if any) and take the first part
let without_query = without_fragment
.split_once('?')
.map_or(without_fragment, |(path, _)| path);
// Return the base path
if without_query.is_empty() {
"/".to_string()
} else {
without_query.to_string()
}
}
/// Checks if the input string is valid JSON and represents an "initialize" method request.
pub fn valid_initialize_method(json_str: &str) -> SdkResult<()> {
// Attempt to deserialize the input string into ClientMessages
let Ok(request) = serde_json::from_str::<ClientMessages>(json_str) else {
return Err(SdkError::bad_request()
.with_message("Bad Request: Session not found")
.into());
};
match request {
ClientMessages::Single(client_message) => {
if !client_message.is_initialize_request() {
return Err(SdkError::bad_request()
.with_message("Bad Request: Session not found")
.into());
}
}
ClientMessages::Batch(client_messages) => {
let count = client_messages
.iter()
.filter(|item| item.is_initialize_request())
.count();
if count > 1 {
return Err(SdkError::invalid_request()
.with_message("Bad Request: Only one initialization request is allowed")
.into());
}
}
};
Ok(())
}
/// Returns the current UTC time, optionally adjusted by a millisecond offset.
///
/// This function fetches the current UTC time and applies an optional offset in milliseconds.
/// Positive values move the time into the future, negative values into the past.
///
/// If the offset would cause an overflow (i.e., exceed the valid range of `OffsetDateTime`),
/// the time is clamped to a safe boundary instead of panicking.
pub fn current_utc_time(ms_offset: Option<i64>) -> OffsetDateTime {
let mut dt = OffsetDateTime::now_utc();
if let Some(ms) = ms_offset {
let duration = time::Duration::milliseconds(ms);
dt = match dt.checked_add(duration) {
Some(new_dt) => new_dt,
None => {
if ms > 0 {
dt.checked_add(time::Duration::milliseconds(180_000))
.unwrap_or(dt)
} else {
dt.checked_sub(time::Duration::milliseconds(180_000))
.unwrap_or(dt)
}
}
};
}
dt
}
/// Formats an `OffsetDateTime` as an ISO 8601 string.
///
/// Uses the default ISO 8601 configuration (with nanosecond precision and `Z` suffix).
/// If formatting fails for any reason (extremely unlikely), returns an empty string as fallback.
pub fn iso8601_time(time_value: OffsetDateTime) -> String {
time_value.format(&Iso8601::DEFAULT).unwrap_or_default()
}
#[cfg(feature = "auth")]
pub fn join_url(base: &Url, segment: &str) -> Result<Url, url::ParseError> {
// Fast early check - Url must be absolute
if base.cannot_be_a_base() {
return Err(url::ParseError::RelativeUrlWithoutBase);
}
// We have to clone - there is no way around this when taking &Url
let mut url = base.clone();
// This is the official, safe, and correct way
url.path_segments_mut()
.map_err(|_| url::ParseError::RelativeUrlWithoutBase)?
.pop_if_empty() // makes it act like a directory
.extend(
segment
.trim_start_matches('/')
.split('/')
.filter(|s| !s.is_empty()),
);
Ok(url)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn tets_remove_query_and_hash() {
assert_eq!(remove_query_and_hash("/messages"), "/messages");
assert_eq!(
remove_query_and_hash("/messages?foo=bar&baz=qux"),
"/messages"
);
assert_eq!(remove_query_and_hash("/messages#section1"), "/messages");
assert_eq!(
remove_query_and_hash("/messages?key=value#section2"),
"/messages"
);
assert_eq!(remove_query_and_hash("/"), "/");
}
#[test]
fn test_join_url() {
let expect = "http://example.com/api/user/userinfo";
let result = join_url(
&Url::parse("http://example.com/api").unwrap(),
"/user/userinfo",
)
.unwrap();
assert_eq!(result.to_string(), expect);
let result = join_url(
&Url::parse("http://example.com/api").unwrap(),
"user/userinfo",
)
.unwrap();
assert_eq!(result.to_string(), expect);
let result = join_url(
&Url::parse("http://example.com/api/").unwrap(),
"/user/userinfo",
)
.unwrap();
assert_eq!(result.to_string(), expect);
let result = join_url(
&Url::parse("http://example.com/api/").unwrap(),
"user/userinfo",
)
.unwrap();
assert_eq!(result.to_string(), expect);
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_runtimes.rs | crates/rust-mcp-sdk/src/mcp_runtimes.rs | #[cfg(feature = "client")]
pub mod client_runtime;
#[cfg(feature = "server")]
pub mod server_runtime;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_http.rs | crates/rust-mcp-sdk/src/mcp_http.rs | mod app_state;
pub(crate) mod http_utils;
mod mcp_http_handler;
pub mod middleware;
mod types;
pub use app_state::*;
pub use http_utils::*;
pub use mcp_http_handler::*;
pub use types::*;
pub use middleware::Middleware;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_handlers.rs | crates/rust-mcp-sdk/src/mcp_handlers.rs | #[cfg(feature = "client")]
pub mod mcp_client_handler;
#[cfg(feature = "client")]
pub mod mcp_client_handler_core;
#[cfg(feature = "server")]
pub mod mcp_server_handler;
#[cfg(feature = "server")]
pub mod mcp_server_handler_core;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/id_generator.rs | crates/rust-mcp-sdk/src/id_generator.rs | mod fast_id_generator;
mod uuid_generator;
pub use crate::mcp_traits::IdGenerator;
pub use fast_id_generator::*;
pub use uuid_generator::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_macros.rs | crates/rust-mcp-sdk/src/mcp_macros.rs | pub mod mcp_icon;
pub mod tool_box;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/session_store/in_memory_session_store.rs | crates/rust-mcp-sdk/src/session_store/in_memory_session_store.rs | use crate::mcp_server::ServerRuntime;
use super::SessionId;
use super::SessionStore;
use async_trait::async_trait;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
/// In-memory session store implementation
///
/// Stores session data in a thread-safe HashMap, using a read-write lock for
#[derive(Clone, Default)]
pub struct InMemorySessionStore {
store: Arc<RwLock<HashMap<String, Arc<ServerRuntime>>>>,
}
impl InMemorySessionStore {
/// Creates a new in-memory session store
///
/// Initializes an empty HashMap wrapped in a read-write lock for thread-safe access.
///
/// # Returns
/// * `Self` - A new InMemorySessionStore instance
pub fn new() -> Self {
Self {
store: Arc::new(RwLock::new(HashMap::new())),
}
}
}
/// Implementation of the SessionStore trait for InMemorySessionStore
///
/// Provides asynchronous methods for managing sessions in memory, ensuring
#[async_trait]
impl SessionStore for InMemorySessionStore {
async fn get(&self, key: &SessionId) -> Option<Arc<ServerRuntime>> {
let store = self.store.read().await;
store.get(key).cloned()
}
async fn set(&self, key: SessionId, value: Arc<ServerRuntime>) {
let mut store = self.store.write().await;
store.insert(key, value);
}
async fn delete(&self, key: &SessionId) {
let mut store = self.store.write().await;
store.remove(key);
}
async fn clear(&self) {
let mut store = self.store.write().await;
store.clear();
}
async fn keys(&self) -> Vec<SessionId> {
let store = self.store.read().await;
store.keys().cloned().collect::<Vec<_>>()
}
async fn values(&self) -> Vec<Arc<ServerRuntime>> {
let store = self.store.read().await;
store.values().cloned().collect::<Vec<_>>()
}
async fn has(&self, session: &SessionId) -> bool {
let store = self.store.read().await;
store.contains_key(session)
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/auth_info.rs | crates/rust-mcp-sdk/src/auth/auth_info.rs | #[cfg(feature = "auth")]
use crate::auth::{AuthClaims, AuthenticationError, IntrospectionResponse};
use crate::{auth::Audience, utils::unix_timestamp_to_systemtime};
#[cfg(feature = "auth")]
use jsonwebtoken::TokenData;
use serde::{Deserialize, Serialize};
use serde_json::Map;
use std::time::SystemTime;
/// Information about a validated access token, provided to request handlers.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuthInfo {
/// Contains a unique id for jwt
/// use jti claim if available, otherwise use token or a reliable hash of token
pub token_unique_id: String,
/// The client ID associated with this token.
#[serde(skip_serializing_if = "std::option::Option::is_none")]
pub client_id: Option<String>,
/// Optional user identifier for the token
#[serde(skip_serializing_if = "std::option::Option::is_none")]
pub user_id: Option<String>,
/// Scopes associated with this token.
#[serde(skip_serializing_if = "std::option::Option::is_none")]
pub scopes: Option<Vec<String>>,
/// When the token expires (in seconds since epoch).
/// This field is optional, as the token may not have an expiration time.
#[serde(skip_serializing_if = "std::option::Option::is_none")]
pub expires_at: Option<SystemTime>,
/// The RFC 8707 resource server identifier for which this token is valid.
/// If set, this MUST match the MCP server's resource identifier (minus hash fragment).
#[serde(skip_serializing_if = "std::option::Option::is_none")]
pub audience: Option<Audience>,
/// Additional data associated with the token.
/// This field can be used to attach any extra data to the auth info.
#[serde(flatten, skip_serializing_if = "std::option::Option::is_none")]
pub extra: Option<Map<String, serde_json::Value>>,
}
#[cfg(feature = "auth")]
impl AuthInfo {
pub fn from_token_data(
token: String,
token_data: TokenData<AuthClaims>,
extra: Option<Map<String, serde_json::Value>>,
) -> Result<Self, AuthenticationError> {
let client_id = token_data.claims.authorized_party.or(token_data
.claims
.client_id
.or(token_data.claims.application_id));
let scopes = token_data
.claims
.scope
.map(|c| c.split(" ").map(|s| s.to_string()).collect::<Vec<_>>());
let expires_at = token_data
.claims
.expiration
.map(|v| unix_timestamp_to_systemtime(v as u64));
let token_unique_id = token_data.claims.jwt_id.unwrap_or(token);
Ok(AuthInfo {
token_unique_id,
client_id,
scopes,
user_id: token_data.claims.subject,
expires_at,
audience: token_data.claims.audience,
extra,
})
}
pub fn from_introspection_response(
token: String,
data: IntrospectionResponse,
extra: Option<Map<String, serde_json::Value>>,
) -> Result<Self, AuthenticationError> {
let scopes = data
.scope
.map(|c| c.split(" ").map(|s| s.to_string()).collect::<Vec<_>>());
let expires_at = data
.expiration
.map(|v| unix_timestamp_to_systemtime(v as u64));
let token_unique_id = data.jwt_id.unwrap_or(token);
Ok(AuthInfo {
token_unique_id,
client_id: data.client_id,
user_id: data.subject,
scopes,
expires_at,
audience: data.audience,
extra,
})
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/auth_provider.rs | crates/rust-mcp-sdk/src/auth/auth_provider.rs | mod remote_auth_provider;
use crate::auth::OauthEndpoint;
use crate::auth::{AuthInfo, AuthenticationError};
use crate::mcp_http::{GenericBody, GenericBodyExt, McpAppState};
use crate::mcp_server::error::TransportServerError;
use async_trait::async_trait;
use http::Method;
pub use remote_auth_provider::*;
use std::collections::HashMap;
use std::sync::Arc;
#[async_trait]
pub trait AuthProvider: Send + Sync {
async fn verify_token(&self, access_token: String) -> Result<AuthInfo, AuthenticationError>;
/// Returns an optional list of scopes required to access this resource.
/// If this function returns `Some(scopes)`, the authenticated user’s token
/// must include **all** of the listed scopes.
/// If any are missing, the request will be rejected with a `403 Forbidden` response.
fn required_scopes(&self) -> Option<&Vec<String>> {
None
}
/// Returns the configured OAuth endpoints for this provider.
///
/// - Key: endpoint path as a string (e.g., "/oauth/token")
/// - Value: corresponding `OauthEndpoint` configuration
///
/// Returns `None` if no endpoints are configured.
fn auth_endpoints(&self) -> Option<&HashMap<String, OauthEndpoint>>;
/// Handles an incoming HTTP request for this authentication provider.
///
/// This is the main entry point for processing OAuth requests,
/// such as token issuance, authorization code exchange, or revocation.
async fn handle_request(
&self,
request: http::Request<&str>,
state: Arc<McpAppState>,
) -> Result<http::Response<GenericBody>, TransportServerError>;
/// Returns the `OauthEndpoint` associated with the given request path.
///
/// This method looks up the request URI path in the endpoints returned by `auth_endpoints()`.
///
/// ⚠️ Note:
/// - If your token and revocation endpoints share the same URL path (valid in some implementations),
/// you may want to override this method to correctly distinguish the request type
/// (e.g., based on request parameters like `grant_type` vs `token`).
fn endpoint_type(&self, request: &http::Request<&str>) -> Option<&OauthEndpoint> {
let endpoints = self.auth_endpoints()?;
endpoints.get(request.uri().path())
}
/// Returns the absolute URL of this resource's OAuth 2.0 Protected Resource Metadata document.
///
/// This corresponds to the `resource_metadata` parameter defined in
/// [RFC 9531 - OAuth 2.0 Protected Resource Metadata](https://datatracker.ietf.org/doc/html/rfc9531).
///
/// The returned URL is an **absolute** URL (including scheme and host), for example:
/// `https://api.example.com/.well-known/oauth-protected-resource`.
///
fn protected_resource_metadata_url(&self) -> Option<&str>;
fn validate_allowed_methods(
&self,
endpoint: &OauthEndpoint,
method: &Method,
) -> Option<http::Response<GenericBody>> {
let allowed_methods = match endpoint {
OauthEndpoint::AuthorizationEndpoint => {
vec![Method::GET, Method::HEAD, Method::OPTIONS]
}
OauthEndpoint::TokenEndpoint => vec![Method::POST, Method::OPTIONS],
OauthEndpoint::RegistrationEndpoint => vec![
Method::POST,
Method::GET,
Method::PUT,
Method::PATCH,
Method::DELETE,
Method::OPTIONS,
],
OauthEndpoint::RevocationEndpoint => vec![Method::POST, Method::OPTIONS],
OauthEndpoint::IntrospectionEndpoint => vec![Method::POST, Method::OPTIONS],
OauthEndpoint::AuthorizationServerMetadata => {
vec![Method::GET, Method::HEAD, Method::OPTIONS]
}
OauthEndpoint::ProtectedResourceMetadata => {
vec![Method::GET, Method::HEAD, Method::OPTIONS]
}
};
if !allowed_methods.contains(method) {
return Some(GenericBody::create_405_response(method, &allowed_methods));
}
None
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/error.rs | crates/rust-mcp-sdk/src/auth/error.rs | use serde::Serialize;
use serde_json::{json, Value};
use thiserror::Error;
#[derive(Debug, Error, Clone, Serialize)]
#[serde(tag = "error", rename_all = "snake_case")]
pub enum AuthenticationError {
#[error("No token verification endpoint available in metadata.")]
NoIntrospectionEndpoint,
#[error("failed to retrieve JWKS from the authorization server : {0}")]
Jwks(String),
#[error("{description}")]
InvalidToken { description: &'static str },
#[error("Inactive Token")]
InactiveToken,
#[error("Resource indicator (aud) missing.")]
AudiencesAttributeMissing,
#[error(
"Insufficient scope: you do not have the necessary permissions to perform this action."
)]
InsufficientScope,
#[error("None of the provided audiences are allowed. Expected ${expected}, got: ${received}")]
AudienceNotAllowed { expected: String, received: String },
#[error("Invalid or expired token: {0}")]
InvalidOrExpiredToken(String),
#[error("{description}")]
TokenVerificationFailed {
description: String,
status_code: Option<u16>,
},
#[error("{description}")]
ServerError { description: String },
#[error("{0}")]
ParsingError(String),
#[error("{0}")]
NotFound(String),
}
impl AuthenticationError {
pub fn as_json_value(&self) -> Value {
let serialized = serde_json::to_value(self).unwrap_or(Value::Null);
let error_name = serialized
.get("error")
.and_then(|v| v.as_str())
.unwrap_or("unknown_error");
json!({
"error": error_name,
"error_description": self.to_string()
})
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/token_verifier.rs | crates/rust-mcp-sdk/src/auth/token_verifier.rs | use super::{AuthInfo, AuthenticationError};
use async_trait::async_trait;
#[async_trait]
pub trait OauthTokenVerifier: Send + Sync {
async fn verify_token(&self, access_token: String) -> Result<AuthInfo, AuthenticationError>;
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/metadata.rs | crates/rust-mcp-sdk/src/auth/metadata.rs | use std::borrow::Cow;
use crate::{
auth::{AuthorizationServerMetadata, OauthProtectedResourceMetadata},
error::McpSdkError,
utils::join_url,
};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use thiserror::Error;
use url::Url;
pub const WELL_KNOWN_OAUTH_AUTHORIZATION_SERVER: &str = "/.well-known/oauth-authorization-server";
pub const OAUTH_PROTECTED_RESOURCE_BASE: &str = "/.well-known/oauth-protected-resource";
#[allow(unused)]
#[derive(Hash, Eq, PartialEq, Clone)]
pub enum OauthEndpoint {
AuthorizationEndpoint,
TokenEndpoint,
RegistrationEndpoint,
RevocationEndpoint,
IntrospectionEndpoint,
AuthorizationServerMetadata,
ProtectedResourceMetadata,
}
#[derive(Debug, Error)]
pub enum AuthMetadateError {
#[error("Url Parse Error: {0}")]
Transport(#[from] url::ParseError),
}
pub struct AuthMetadataEndpoints {
pub protected_resource_endpoint: String,
pub authorization_server_endpoint: String,
}
// Builder struct to construct both OAuthMetadata and OAuthProtectedResourceMetadata
#[derive(Default)]
pub struct AuthMetadataBuilder<'a> {
// OAuthMetadata-specific fields
issuer: Option<Cow<'a, str>>,
authorization_endpoint: Option<Cow<'a, str>>,
token_endpoint: Option<Cow<'a, str>>,
registration_endpoint: Option<Cow<'a, str>>,
revocation_endpoint: Option<Cow<'a, str>>,
introspection_endpoint: Option<Cow<'a, str>>,
scopes_supported: Option<Vec<Cow<'a, str>>>,
response_types_supported: Option<Vec<Cow<'a, str>>>,
response_modes_supported: Option<Vec<Cow<'a, str>>>,
grant_types_supported: Option<Vec<Cow<'a, str>>>,
token_endpoint_auth_methods_supported: Option<Vec<Cow<'a, str>>>,
token_endpoint_auth_signing_alg_values_supported: Option<Vec<Cow<'a, str>>>,
revocation_endpoint_auth_signing_alg_values_supported: Option<Vec<Cow<'a, str>>>,
revocation_endpoint_auth_methods_supported: Option<Vec<Cow<'a, str>>>,
introspection_endpoint_auth_methods_supported: Option<Vec<Cow<'a, str>>>,
introspection_endpoint_auth_signing_alg_values_supported: Option<Vec<Cow<'a, str>>>,
code_challenge_methods_supported: Option<Vec<Cow<'a, str>>>,
service_documentation: Option<Cow<'a, str>>,
// OAuthProtectedResourceMetadata-specific fields
resource: Option<Cow<'a, str>>,
authorization_servers: Option<Vec<Cow<'a, str>>>,
required_scopes: Option<Vec<Cow<'a, str>>>,
jwks_uri: Option<Cow<'a, str>>,
bearer_methods_supported: Option<Vec<Cow<'a, str>>>,
resource_signing_alg_values_supported: Option<Vec<Cow<'a, str>>>,
resource_name: Option<Cow<'a, str>>,
resource_documentation: Option<Cow<'a, str>>,
resource_policy_uri: Option<Cow<'a, str>>,
resource_tos_uri: Option<Cow<'a, str>>,
tls_client_certificate_bound_access_tokens: Option<bool>,
authorization_details_types_supported: Option<Vec<Cow<'a, str>>>,
dpop_signing_alg_values_supported: Option<Vec<Cow<'a, str>>>,
dpop_bound_access_tokens_required: Option<bool>,
// none-standard
userinfo_endpoint: Option<Cow<'a, str>>,
}
// Result struct to hold both metadata types
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct OauthMetadata {
authorization_server_metadata: AuthorizationServerMetadata,
protected_resource_metadata: OauthProtectedResourceMetadata,
}
impl OauthMetadata {
pub fn protected_resource_metadata(&self) -> &OauthProtectedResourceMetadata {
&self.protected_resource_metadata
}
pub fn authorization_server_metadata(&self) -> &AuthorizationServerMetadata {
&self.authorization_server_metadata
}
pub fn endpoints(&self) -> AuthMetadataEndpoints {
AuthMetadataEndpoints {
authorization_server_endpoint: WELL_KNOWN_OAUTH_AUTHORIZATION_SERVER.to_string(),
protected_resource_endpoint: format!(
"{OAUTH_PROTECTED_RESOURCE_BASE}{}",
match self.protected_resource_metadata.resource.path() {
"/" => "",
other => other,
}
),
}
}
}
impl<'a> AuthMetadataBuilder<'a> {
fn with_defaults(protected_resource: &'a str) -> Self {
Self {
response_types_supported: Some(vec!["code".into()]),
code_challenge_methods_supported: Some(vec!["S256".into()]),
token_endpoint_auth_methods_supported: Some(vec!["client_secret_post".into()]),
grant_types_supported: Some(vec!["authorization_code".into(), "refresh_token".into()]),
resource: Some(protected_resource.into()),
..Default::default()
}
}
/// Creates a new instance of the builder for the given protected resource.
/// The `protected_resource` parameter must specify the full URL of the MCP server.
pub fn new(protected_resource_url: &'a str) -> Self {
Self::with_defaults(protected_resource_url)
}
pub async fn from_discovery_url<S>(
discovery_url: &str,
protected_resource: S,
required_scopes: Vec<S>,
) -> Result<Self, McpSdkError>
where
S: Into<Cow<'a, str>>,
{
let client = Client::new();
let json: Value = client
.get(discovery_url)
.send()
.await
.map_err(|e| McpSdkError::Internal {
description: format!(
"Failed to fetch discovery document : \"{discovery_url}\": {e}"
),
})?
.error_for_status()
.map_err(|e| McpSdkError::Internal {
description: format!("Discovery endpoint returned error: {e}"),
})?
.json()
.await
.map_err(|e| McpSdkError::Internal {
description: format!("Failed to parse JSON from discovery document: {e}"),
})?;
// Helper to extract string field safely
let get_str = |key: &str| {
json.get(key)
.and_then(|v| v.as_str())
.map(|s| Cow::<str>::Owned(s.to_string()))
};
// Helper for optional array of strings
let get_str_array = |key: &str| {
json.get(key).and_then(|v| v.as_array()).map(|arr| {
arr.iter()
.filter_map(|item| item.as_str())
.filter(|v| !v.is_empty())
.map(|s| Cow::<str>::Owned(s.to_string()))
.collect::<Vec<_>>()
})
};
let issuer = get_str("issuer").ok_or_else(|| McpSdkError::Internal {
description: "Missing 'issuer' in discovery document".to_string(),
})?;
Ok(Self {
issuer: Some(issuer.clone()),
authorization_endpoint: get_str("authorization_endpoint"),
scopes_supported: get_str_array("scopes_supported"),
required_scopes: Some(required_scopes.into_iter().map(|s| s.into()).collect()),
token_endpoint: get_str("token_endpoint"),
jwks_uri: get_str("jwks_uri"),
userinfo_endpoint: get_str("userinfo_endpoint"),
registration_endpoint: get_str("registration_endpoint"),
revocation_endpoint: get_str("revocation_endpoint"),
introspection_endpoint: get_str("introspection_endpoint"),
response_types_supported: get_str_array("response_types_supported"),
response_modes_supported: get_str_array("response_modes_supported"),
grant_types_supported: get_str_array("grant_types_supported"),
token_endpoint_auth_methods_supported: get_str_array(
"token_endpoint_auth_methods_supported",
),
token_endpoint_auth_signing_alg_values_supported: get_str_array(
"token_endpoint_auth_signing_alg_values_supported",
),
revocation_endpoint_auth_signing_alg_values_supported: get_str_array(
"revocation_endpoint_auth_signing_alg_values_supported",
),
revocation_endpoint_auth_methods_supported: get_str_array(
"revocation_endpoint_auth_methods_supported",
),
introspection_endpoint_auth_methods_supported: get_str_array(
"introspection_endpoint_auth_methods_supported",
),
introspection_endpoint_auth_signing_alg_values_supported: get_str_array(
"introspection_endpoint_auth_signing_alg_values_supported",
),
code_challenge_methods_supported: get_str_array("code_challenge_methods_supported"),
service_documentation: get_str("service_documentation"),
resource: Some(protected_resource.into()),
authorization_servers: Some(vec![issuer]),
bearer_methods_supported: None,
resource_signing_alg_values_supported: None,
resource_name: None,
resource_documentation: None,
resource_policy_uri: None,
resource_tos_uri: None,
tls_client_certificate_bound_access_tokens: None,
authorization_details_types_supported: None,
dpop_signing_alg_values_supported: None,
dpop_bound_access_tokens_required: None,
})
}
fn parse_url_field<S>(
field_name: &str,
value: Option<S>,
base_url: Option<&Url>,
) -> Result<Url, McpSdkError>
where
S: Into<Cow<'a, str>>,
{
let value = value
.ok_or(McpSdkError::Internal {
description: format!("Error: '{field_name}' is missing."),
})?
.into();
let url = if value.contains("://") {
// Absolute URL
Url::parse(&value)
} else if let Some(base_url) = base_url {
// Relative URL, join with base_url
join_url(base_url, &value)
} else {
// No base_url provided, try to parse as absolute URL anyway
Url::parse(&value)
};
url.map_err(|e| McpSdkError::Internal {
description: format!("Error: '{field_name}' is not a valid URL: {e}"),
})
}
fn parse_optional_url_field<S>(
field_name: &str,
value: Option<S>,
base_url: Option<&Url>,
) -> Result<Option<Url>, McpSdkError>
where
S: Into<Cow<'a, str>>,
{
value
.map(|v| {
let value = v.into();
if value.contains("://") {
// Absolute URL
Url::parse(&value)
} else if let Some(base_url) = base_url {
// Relative URL, join with base_url
join_url(base_url, &value)
} else {
// No base_url provided, try to parse as absolute URL anyway
Url::parse(&value)
}
})
.transpose()
.map_err(|e| McpSdkError::Internal {
description: format!("Error: '{field_name}' is not a valid URL: {e}"),
})
}
pub fn scopes_supported<S>(mut self, scopes: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.scopes_supported = Some(scopes.into_iter().map(|s| s.into()).collect());
self
}
// OAuthMetadata setters
pub fn issuer<S>(mut self, issuer: S) -> Self
where
S: Into<Cow<'a, str>>,
{
self.issuer = Some(issuer.into());
self
}
pub fn service_documentation<S>(mut self, url: S) -> Self
where
S: Into<Cow<'a, str>>,
{
self.service_documentation = Some(url.into());
self
}
pub fn authorization_endpoint<S>(mut self, url: S) -> Self
where
S: Into<Cow<'a, str>>,
{
self.authorization_endpoint = Some(url.into());
self
}
pub fn token_endpoint<S>(mut self, url: S) -> Self
where
S: Into<Cow<'a, str>>,
{
self.token_endpoint = Some(url.into());
self
}
pub fn response_types_supported<S>(mut self, types: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.response_types_supported = Some(types.into_iter().map(|s| s.into()).collect());
self
}
pub fn response_modes_supported<S>(mut self, modes: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.response_modes_supported = Some(modes.into_iter().map(|s| s.into()).collect());
self
}
pub fn registration_endpoint(mut self, url: &'a str) -> Self {
self.registration_endpoint = Some(url.into());
self
}
pub fn userinfo_endpoint(mut self, url: &'a str) -> Self {
self.userinfo_endpoint = Some(url.into());
self
}
pub fn grant_types_supported<S>(mut self, types: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.grant_types_supported = Some(types.into_iter().map(|s| s.into()).collect());
self
}
pub fn token_endpoint_auth_methods_supported<S>(mut self, methods: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.token_endpoint_auth_methods_supported =
Some(methods.into_iter().map(|s| s.into()).collect());
self
}
pub fn token_endpoint_auth_signing_alg_values_supported<S>(mut self, algs: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.token_endpoint_auth_signing_alg_values_supported =
Some(algs.into_iter().map(|s| s.into()).collect());
self
}
pub fn revocation_endpoint(mut self, url: &'a str) -> Self {
self.revocation_endpoint = Some(url.into());
self
}
pub fn revocation_endpoint_auth_methods_supported<S>(mut self, methods: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.revocation_endpoint_auth_methods_supported =
Some(methods.into_iter().map(|s| s.into()).collect());
self
}
pub fn revocation_endpoint_auth_signing_alg_values_supported<S>(mut self, algs: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.revocation_endpoint_auth_signing_alg_values_supported =
Some(algs.into_iter().map(|s| s.into()).collect());
self
}
pub fn introspection_endpoint(mut self, endpoint: &'a str) -> Self {
self.introspection_endpoint = Some(endpoint.into());
self
}
pub fn introspection_endpoint_auth_methods_supported<S>(mut self, methods: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.introspection_endpoint_auth_methods_supported =
Some(methods.into_iter().map(|s| s.into()).collect());
self
}
pub fn introspection_endpoint_auth_signing_alg_values_supported<S>(
mut self,
algs: Vec<String>,
) -> Self
where
S: Into<Cow<'a, str>>,
{
self.introspection_endpoint_auth_signing_alg_values_supported =
Some(algs.into_iter().map(|s| s.into()).collect());
self
}
pub fn code_challenge_methods_supported<S>(mut self, methods: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.code_challenge_methods_supported =
Some(methods.into_iter().map(|s| s.into()).collect());
self
}
// OAuthProtectedResourceMetadata setters
pub fn resource(mut self, url: &'a str) -> Self {
self.resource = Some(url.into());
self
}
pub fn authorization_servers(mut self, servers: Vec<&'a str>) -> Self {
self.authorization_servers = Some(servers.into_iter().map(|s| s.into()).collect());
self
}
pub fn reqquired_scopes<S>(mut self, scopes: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.required_scopes = Some(scopes.into_iter().map(|s| s.into()).collect());
self
}
pub fn resource_documentation<S>(mut self, doc: String) -> Self
where
S: Into<Cow<'a, str>>,
{
self.resource_documentation = Some(doc.into());
self
}
pub fn jwks_uri(mut self, url: &'a str) -> Self {
self.jwks_uri = Some(url.into());
self
}
pub fn bearer_methods_supported<S>(mut self, methods: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.bearer_methods_supported = Some(methods.into_iter().map(|s| s.into()).collect());
self
}
pub fn resource_signing_alg_values_supported<S>(mut self, algs: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.resource_signing_alg_values_supported =
Some(algs.into_iter().map(|s| s.into()).collect());
self
}
pub fn resource_name<S>(mut self, name: S) -> Self
where
S: Into<Cow<'a, str>>,
{
self.resource_name = Some(name.into());
self
}
pub fn resource_policy_uri(mut self, url: &'a str) -> Self {
self.resource_policy_uri = Some(url.into());
self
}
pub fn resource_tos_uri(mut self, url: &'a str) -> Self {
self.resource_tos_uri = Some(url.into());
self
}
pub fn tls_client_certificate_bound_access_tokens(mut self, value: bool) -> Self {
self.tls_client_certificate_bound_access_tokens = Some(value);
self
}
pub fn authorization_details_types_supported<S>(mut self, types: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.authorization_details_types_supported =
Some(types.into_iter().map(|s| s.into()).collect());
self
}
pub fn dpop_signing_alg_values_supported<S>(mut self, algs: Vec<S>) -> Self
where
S: Into<Cow<'a, str>>,
{
self.dpop_signing_alg_values_supported = Some(algs.into_iter().map(|s| s.into()).collect());
self
}
pub fn dpop_bound_access_tokens_required(mut self, value: bool) -> Self {
self.dpop_bound_access_tokens_required = Some(value);
self
}
// Build method to construct OauthMetadata
pub fn build(
self,
) -> Result<(AuthorizationServerMetadata, OauthProtectedResourceMetadata), McpSdkError> {
let issuer = Self::parse_url_field("issuer", self.issuer, None)?;
let authorization_endpoint = Self::parse_url_field(
"authorization_endpoint",
self.authorization_endpoint,
Some(&issuer),
)?;
let token_endpoint =
Self::parse_url_field("token_endpoint", self.token_endpoint, Some(&issuer))?;
let registration_endpoint = Self::parse_optional_url_field(
"registration_endpoint",
self.registration_endpoint,
Some(&issuer),
)?;
let revocation_endpoint = Self::parse_optional_url_field(
"revocation_endpoint",
self.revocation_endpoint,
Some(&issuer),
)?;
let introspection_endpoint = Self::parse_optional_url_field(
"introspection_endpoint",
self.introspection_endpoint,
Some(&issuer),
)?;
let service_documentation = Self::parse_optional_url_field(
"service_documentation",
self.service_documentation,
None,
)?;
let jwks_uri = Self::parse_optional_url_field("jwks_uri", self.jwks_uri, Some(&issuer))?;
let authorization_server_metadata = AuthorizationServerMetadata {
issuer,
authorization_endpoint,
token_endpoint,
registration_endpoint,
service_documentation,
revocation_endpoint,
introspection_endpoint,
userinfo_endpoint: self.userinfo_endpoint.map(|v| v.into()),
response_types_supported: self
.response_types_supported
.unwrap_or_default()
.into_iter() // iterate over Cow<'a, str>
.map(|c| c.into_owned())
.collect(),
response_modes_supported: self
.response_modes_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
scopes_supported: self
.scopes_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
grant_types_supported: self
.grant_types_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
token_endpoint_auth_methods_supported: self
.token_endpoint_auth_methods_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
token_endpoint_auth_signing_alg_values_supported: self
.token_endpoint_auth_signing_alg_values_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
revocation_endpoint_auth_signing_alg_values_supported: self
.revocation_endpoint_auth_signing_alg_values_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
revocation_endpoint_auth_methods_supported: self
.revocation_endpoint_auth_methods_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
introspection_endpoint_auth_methods_supported: self
.introspection_endpoint_auth_methods_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
introspection_endpoint_auth_signing_alg_values_supported: self
.introspection_endpoint_auth_signing_alg_values_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
code_challenge_methods_supported: self
.code_challenge_methods_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
jwks_uri: jwks_uri.clone(),
};
let resource = Self::parse_url_field("resource", self.resource, None)?;
let resource_policy_uri =
Self::parse_optional_url_field("resource_policy_uri", self.resource_policy_uri, None)?;
let resource_tos_uri =
Self::parse_optional_url_field("resource_tos_uri", self.resource_tos_uri, None)?;
// Validate mandatory authorization_servers
let authorization_servers =
self.authorization_servers
.ok_or_else(|| McpSdkError::Internal {
description: "Error: 'authorization_servers' is missing".to_string(),
})?;
if authorization_servers.is_empty() {
return Err(McpSdkError::Internal {
description: "Error: 'authorization_servers' must contain at least one URL"
.to_string(),
});
}
let authorization_servers = authorization_servers
.iter()
.map(|url| {
Url::parse(url).map_err(|err| McpSdkError::Internal {
description: format!(
"Error: 'authorization_servers' contains invalid URL '{url}': {err}",
),
})
})
.collect::<Result<Vec<_>, _>>()?;
let protected_resource_metadata = OauthProtectedResourceMetadata {
resource,
authorization_servers,
jwks_uri,
scopes_supported: self
.required_scopes
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
bearer_methods_supported: self
.bearer_methods_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
resource_signing_alg_values_supported: self
.resource_signing_alg_values_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
resource_name: self.resource_name.map(|s| s.into()),
resource_documentation: self.resource_documentation.map(|s| s.into()),
resource_policy_uri,
resource_tos_uri,
tls_client_certificate_bound_access_tokens: self
.tls_client_certificate_bound_access_tokens,
authorization_details_types_supported: self
.authorization_details_types_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
dpop_signing_alg_values_supported: self
.dpop_signing_alg_values_supported
.map(|v| v.into_iter().map(|c| c.into_owned()).collect()),
dpop_bound_access_tokens_required: self.dpop_bound_access_tokens_required,
};
Ok((authorization_server_metadata, protected_resource_metadata))
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/spec.rs | crates/rust-mcp-sdk/src/auth/spec.rs | mod audience;
#[cfg(feature = "auth")]
mod claims;
#[cfg(feature = "auth")]
mod discovery;
#[cfg(feature = "auth")]
mod jwk;
pub use audience::*;
#[cfg(feature = "auth")]
pub use claims::*;
#[cfg(feature = "auth")]
pub use discovery::*;
#[cfg(feature = "auth")]
pub use jwk::*;
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/spec/discovery.rs | crates/rust-mcp-sdk/src/auth/spec/discovery.rs | use crate::{
auth::{OauthEndpoint, OAUTH_PROTECTED_RESOURCE_BASE, WELL_KNOWN_OAUTH_AUTHORIZATION_SERVER},
error::McpSdkError,
mcp_http::url_base,
};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use url::Url;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct AuthorizationServerMetadata {
/// The base URL of the authorization server (e.g., "http://localhost:8080/realms/master/").
pub issuer: Url,
/// URL to which the client redirects the user for authorization.
pub authorization_endpoint: Url,
/// URL to exchange authorization codes for tokens or refresh tokens.
pub token_endpoint: Url,
/// URL of the authorization server's JWK Set `JWK` document
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub jwks_uri: Option<Url>,
/// Endpoint where clients can register dynamically.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub registration_endpoint: Option<Url>,
/// List of supported OAuth scopes (e.g., "openid", "profile", "email", mcp:tools)
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub scopes_supported: Option<Vec<String>>,
/// Response Types. Required by spec. If missing, default is empty vec.
/// Examples: "code", "token", "id_token"
#[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")]
pub response_types_supported: Vec<String>,
/// Response Modes. Indicates how the authorization response is returned.
/// Examples: "query", "fragment", "form_post"
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub response_modes_supported: Option<Vec<String>>,
// ui_locales_supported
// op_policy_uri
// op_tos_uri
/// List of supported Grant Types
/// Examples: "authorization_code", "client_credentials", "refresh_token"
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub grant_types_supported: Option<Vec<String>>,
/// Methods like "client_secret_basic", "client_secret_post"
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub token_endpoint_auth_methods_supported: Option<Vec<String>>,
/// Signing algorithms for client authentication (e.g., "RS256")
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub token_endpoint_auth_signing_alg_values_supported: Option<Vec<String>>,
/// Link to human-readable docs for developers.
/// <https://datatracker.ietf.org/doc/html/rfc8414>
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub service_documentation: Option<Url>,
/// OAuth 2.0 Token Revocation endpoint.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub revocation_endpoint: Option<Url>,
/// Similar to token endpoint, but for revocation-specific auth.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub revocation_endpoint_auth_signing_alg_values_supported: Option<Vec<String>>,
/// Tells the client which authentication methods are supported when accessing the token revocation endpoint.
/// These are standardized methods from RFC 6749 (OAuth 2.0)
/// Common values: "client_secret_basic", "client_secret_post", "private_key_jwt"
/// `client_secret_basic` – client credentials sent in HTTP Basic Auth.
/// `client_secret_post` – client credentials sent in the POST body.
/// `private_key_jwt` – client authenticates using a signed JWT.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub revocation_endpoint_auth_methods_supported: Option<Vec<String>>,
/// URL to validate tokens and get their metadata.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub introspection_endpoint: Option<Url>,
/// Auth methods for accessing introspection.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub introspection_endpoint_auth_methods_supported: Option<Vec<String>>,
/// Algorithms for accessing introspection.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub introspection_endpoint_auth_signing_alg_values_supported: Option<Vec<String>>,
/// Methods supported for PKCE (Proof Key for Code Exchange).
/// Common values: "plain", "S256"
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub code_challenge_methods_supported: Option<Vec<String>>,
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub userinfo_endpoint: Option<String>,
}
impl AuthorizationServerMetadata {
/// Creates a new `AuthorizationServerMetadata` instance with the minimal required fields.
/// According to the OAuth 2.0 Authorization Server Metadata Metadata specification (RFC 8414),
/// the following fields are **required** for a valid metadata document:
/// - `issuer`
/// - `authorization_endpoint`
/// - `token_endpoint`
///
/// All other fields are initialized with their default values (typically `None` or empty collections).
///
pub fn new(
issuer: &str,
authorization_endpoint: &str,
token_endpoint: &str,
) -> Result<Self, url::ParseError> {
let issuer = Url::parse(issuer)?;
let authorization_endpoint = Url::parse(authorization_endpoint)?;
let token_endpoint = Url::parse(token_endpoint)?;
Ok(Self {
issuer,
authorization_endpoint,
token_endpoint,
jwks_uri: Default::default(),
registration_endpoint: Default::default(),
scopes_supported: Default::default(),
response_types_supported: Default::default(),
response_modes_supported: Default::default(),
grant_types_supported: Default::default(),
token_endpoint_auth_methods_supported: Default::default(),
token_endpoint_auth_signing_alg_values_supported: Default::default(),
service_documentation: Default::default(),
revocation_endpoint: Default::default(),
revocation_endpoint_auth_signing_alg_values_supported: Default::default(),
revocation_endpoint_auth_methods_supported: Default::default(),
introspection_endpoint: Default::default(),
introspection_endpoint_auth_methods_supported: Default::default(),
introspection_endpoint_auth_signing_alg_values_supported: Default::default(),
code_challenge_methods_supported: Default::default(),
userinfo_endpoint: Default::default(),
})
}
/// Fetches authorization server metadata from a remote `.well-known/openid-configuration`
/// or OAuth 2.0 Authorization Server Metadata endpoint.
///
/// This performs an HTTP GET request and deserializes the response directly into
/// `AuthorizationServerMetadata`. The endpoint must return a JSON document conforming
/// to RFC 8414 (OAuth 2.0 Authorization Server Metadata) or OpenID Connect Discovery 1.0.
///
pub async fn from_discovery_url(discovery_url: &str) -> Result<Self, McpSdkError> {
let client = Client::new();
let metadata = client
.get(discovery_url)
.send()
.await
.map_err(|err| McpSdkError::Internal {
description: err.to_string(),
})?
.json::<AuthorizationServerMetadata>()
.await
.map_err(|err| McpSdkError::Internal {
description: err.to_string(),
})?;
Ok(metadata)
}
}
/// represents metadata about a protected resource in the OAuth 2.0 ecosystem.
/// It allows clients and authorization servers to discover how to interact with a protected resource (like an MCP endpoint),
/// including security requirements and supported features.
/// <https://datatracker.ietf.org/doc/rfc9728>
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct OauthProtectedResourceMetadata {
/// The base identifier of the protected resource (e.g., an MCP server's URI).
/// This is the only required field.
pub resource: Url,
/// List of authorization servers that can issue access tokens for this resource.
/// Allows dynamic trust discovery.
#[serde(default, skip_serializing_if = "::std::vec::Vec::is_empty")]
pub authorization_servers: Vec<Url>,
/// URL where the resource exposes its public keys (JWKS) to verify signed tokens.
/// Typically used to verify JWT access tokens.
/// Example: `https://example.com/.well-known/jwks.json`
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub jwks_uri: Option<Url>,
/// OAuth scopes the resource supports (e.g., "mcp:tool", "read", "write", "admin").
/// Helps clients know what they can request for access.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub scopes_supported: Option<Vec<String>>,
/// Methods accepted for presenting Bearer tokens:
/// `authorization_header` (typical)
/// `form_post`
/// `uri_query`
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub bearer_methods_supported: Option<Vec<String>>,
/// Supported signing algorithms for access tokens (if tokens are JWTs).
/// Example: ["RS256", "ES256"]
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub resource_signing_alg_values_supported: Option<Vec<String>>,
/// A human-readable name for the resource.
/// Useful for UIs, logs, or developer documentation.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub resource_name: Option<String>,
/// URL to developer docs describing the resource and how to use it.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub resource_documentation: Option<String>,
/// URL to the resource's access policy or terms (e.g., rules on who can access what).
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub resource_policy_uri: Option<Url>,
/// URL to terms of service applicable to this resource.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub resource_tos_uri: Option<Url>,
/// If true, access tokens must be bound to a client TLS certificate.
/// Used in mutual TLS scenarios for additional security.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub tls_client_certificate_bound_access_tokens: Option<bool>,
///Lists structured authorization types supported (used with Rich Authorization Requests (RAR)
/// Example: ["payment_initiation", "account_information"]
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub authorization_details_types_supported: Option<Vec<String>>,
/// Supported algorithms for DPoP (Demonstration of Proof-of-Possession) tokens.
/// Example: ["ES256", "RS256"]
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub dpop_signing_alg_values_supported: Option<Vec<String>>,
/// If true, the resource requires access tokens to be DPoP-bound.
/// Enhances security by tying tokens to a specific client and key.
#[serde(default, skip_serializing_if = "::std::option::Option::is_none")]
pub dpop_bound_access_tokens_required: Option<bool>,
}
impl OauthProtectedResourceMetadata {
/// Creates a new `OAuthProtectedResourceMetadata` instance with only the
/// minimal required fields populated.
///
/// The `resource` and each entry in `authorization_servers` must be valid URLs.
/// All other metadata fields are initialized to their defaults.
/// To provide optional or extended metadata, assign those fields after creation or construct the struct directly.
pub fn new<S>(
resource: S,
authorization_servers: Vec<S>,
scopes_supported: Option<Vec<String>>,
) -> Result<Self, url::ParseError>
where
S: AsRef<str>,
{
let resource = Url::parse(resource.as_ref())?;
let authorization_servers: Vec<_> = authorization_servers
.iter()
.map(|s| Url::parse(s.as_ref()))
.collect::<Result<_, _>>()?;
Ok(Self {
resource,
authorization_servers,
jwks_uri: Default::default(),
scopes_supported,
bearer_methods_supported: Default::default(),
resource_signing_alg_values_supported: Default::default(),
resource_name: Default::default(),
resource_documentation: Default::default(),
resource_policy_uri: Default::default(),
resource_tos_uri: Default::default(),
tls_client_certificate_bound_access_tokens: Default::default(),
authorization_details_types_supported: Default::default(),
dpop_signing_alg_values_supported: Default::default(),
dpop_bound_access_tokens_required: Default::default(),
})
}
}
pub fn create_protected_resource_metadata_url(path: &str) -> String {
format!(
"{OAUTH_PROTECTED_RESOURCE_BASE}{}",
if path == "/" { "" } else { path }
)
}
pub fn create_discovery_endpoints(
mcp_server_url: &str,
) -> Result<(HashMap<String, OauthEndpoint>, String), McpSdkError> {
let mut endpoint_map = HashMap::new();
endpoint_map.insert(
WELL_KNOWN_OAUTH_AUTHORIZATION_SERVER.to_string(),
OauthEndpoint::AuthorizationServerMetadata,
);
let resource_url = Url::parse(mcp_server_url).map_err(|err| McpSdkError::Internal {
description: err.to_string(),
})?;
let relative_url = create_protected_resource_metadata_url(resource_url.path());
let base_url = url_base(&resource_url);
let protected_resource_metadata_url =
format!("{}{relative_url}", base_url.trim_end_matches('/'));
endpoint_map.insert(relative_url, OauthEndpoint::ProtectedResourceMetadata);
Ok((endpoint_map, protected_resource_metadata_url))
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/spec/jwk.rs | crates/rust-mcp-sdk/src/auth/spec/jwk.rs | use crate::auth::{Audience, AuthClaims, AuthenticationError};
use http::StatusCode;
use jsonwebtoken::{decode, decode_header, jwk::Jwk, DecodingKey, TokenData, Validation};
use serde::{Deserialize, Serialize};
/// A JSON Web Key Set (JWKS) containing a list of JSON Web Keys.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct JsonWebKeySet {
/// List of JSON Web Keys.
pub keys: Vec<Jwk>,
}
pub fn decode_token_header(token: &str) -> Result<jsonwebtoken::Header, AuthenticationError> {
let header =
decode_header(token).map_err(|err| AuthenticationError::TokenVerificationFailed {
description: err.to_string(),
status_code: Some(StatusCode::UNAUTHORIZED.as_u16()),
})?;
Ok(header)
}
impl JsonWebKeySet {
pub fn verify(
&self,
token: String,
validate_audience: Option<&Audience>,
validate_issuer: Option<&String>,
) -> Result<TokenData<AuthClaims>, AuthenticationError> {
let header = decode_token_header(&token)?;
let kid = header.kid.ok_or(AuthenticationError::InvalidToken {
description: "Missing kid in token header",
})?;
let jwk = self
.keys
.iter()
.find(|key| key.common.key_id == Some(kid.clone()))
.ok_or(AuthenticationError::InvalidToken {
description: "No matching key found in JWKS",
})?;
let decoding_key = DecodingKey::from_jwk(jwk).map_err(|err| {
AuthenticationError::TokenVerificationFailed {
description: err.to_string(),
status_code: None,
}
})?;
let mut validation = Validation::new(header.alg);
let mut required_claims = vec![];
if let Some(validate_audience) = validate_audience {
let vec_audience = match validate_audience {
Audience::Single(aud) => &vec![aud.to_owned()],
Audience::Multiple(auds) => auds,
};
validation.set_audience(vec_audience);
required_claims.push("aud");
} else {
validation.validate_aud = false;
}
if let Some(validate_issuer) = validate_issuer {
validation.set_issuer(&[validate_issuer]);
required_claims.push("iss");
}
if !required_claims.is_empty() {
validation.set_required_spec_claims(&required_claims);
}
let token_data =
decode::<AuthClaims>(token, &decoding_key, &validation).map_err(|err| {
match err.kind() {
jsonwebtoken::errors::ErrorKind::InvalidToken => {
AuthenticationError::InvalidToken {
description: "Invalid token",
}
}
jsonwebtoken::errors::ErrorKind::ExpiredSignature => {
AuthenticationError::InvalidToken {
description: "Expired token",
}
}
_ => AuthenticationError::TokenVerificationFailed {
description: err.to_string(),
status_code: Some(StatusCode::BAD_REQUEST.as_u16()),
},
}
})?;
Ok(token_data)
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/spec/audience.rs | crates/rust-mcp-sdk/src/auth/spec/audience.rs | use core::fmt;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::Value;
/// Represents the audience claim, which can be a single string or a list of strings.
#[derive(Debug, Clone)]
pub enum Audience {
Single(String),
Multiple(Vec<String>),
}
impl Audience {
/// Converts the audience to a `Vec<String>` for uniform access.
pub fn to_vec(&self) -> Vec<String> {
match self {
Audience::Single(s) => vec![s.clone()],
Audience::Multiple(v) => v.clone(),
}
}
}
impl fmt::Display for Audience {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Audience::Single(s) => write!(f, "{s}"),
Audience::Multiple(v) => {
let formatted = v.join(", ");
write!(f, "{formatted}")
}
}
}
}
impl PartialEq for Audience {
fn eq(&self, other: &Self) -> bool {
self.to_vec() == other.to_vec()
}
}
impl Eq for Audience {}
impl Serialize for Audience {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
// Serialize a single string directly as a JSON string
Audience::Single(s) => serializer.serialize_str(s),
// Serialize multiple strings as a JSON array
Audience::Multiple(v) => serializer.collect_seq(v),
}
}
}
impl<'de> Deserialize<'de> for Audience {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
// Use a Value to handle both string and array cases
let value = Value::deserialize(deserializer)?;
match value {
Value::String(s) => Ok(Audience::Single(s)),
Value::Array(arr) => {
let strings = arr
.into_iter()
.map(|v| match v {
Value::String(s) => Ok(s),
_ => Err(serde::de::Error::custom(
"audience array must contain strings",
)),
})
.collect::<Result<Vec<String>, D::Error>>()?;
Ok(Audience::Multiple(strings))
}
_ => Err(serde::de::Error::custom(
"audience must be a string or an array of strings",
)),
}
}
}
// Allow converting from &str
impl From<&str> for Audience {
fn from(s: &str) -> Self {
Audience::Single(s.to_string())
}
}
// Allow converting from String
impl From<String> for Audience {
fn from(s: String) -> Self {
Audience::Single(s)
}
}
// Allow converting from Vec<String>
impl From<Vec<String>> for Audience {
fn from(v: Vec<String>) -> Self {
Audience::Multiple(v)
}
}
// Allow converting from Vec<&str> for convenience
impl From<Vec<&str>> for Audience {
fn from(v: Vec<&str>) -> Self {
Audience::Multiple(v.into_iter().map(|s| s.to_string()).collect())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/spec/claims.rs | crates/rust-mcp-sdk/src/auth/spec/claims.rs | use super::Audience;
use serde::{Deserialize, Serialize};
/// Represents a structured address for the OIDC address claim.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Address {
/// Full mailing address, formatted for display or use.
#[serde(skip_serializing_if = "Option::is_none")]
pub formatted: Option<String>,
/// Street address component (e.g., house number and street name).
#[serde(skip_serializing_if = "Option::is_none")]
pub street_address: Option<String>,
/// City or locality component.
#[serde(skip_serializing_if = "Option::is_none")]
pub locality: Option<String>,
/// State, province, or region component.
#[serde(skip_serializing_if = "Option::is_none")]
pub region: Option<String>,
/// ZIP or postal code component.
#[serde(skip_serializing_if = "Option::is_none")]
pub postal_code: Option<String>,
/// Country name component.
#[serde(skip_serializing_if = "Option::is_none")]
pub country: Option<String>,
}
/// Represents a combined set of JWT, OAuth 2.0, OIDC, and provider-specific claims.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct AuthClaims {
// Standard JWT Claims (RFC 7519)
/// Issuer - Identifies the authorization server that issued the token (JWT: iss).
#[serde(rename = "iss", skip_serializing_if = "Option::is_none")]
pub issuer: Option<String>,
/// Subject - Unique identifier for the user or client (JWT: sub).
#[serde(rename = "sub", skip_serializing_if = "Option::is_none")]
pub subject: Option<String>,
/// Audience - Identifies the intended recipients, can be a string or array (JWT: aud).
#[serde(rename = "aud", skip_serializing_if = "Option::is_none")]
pub audience: Option<Audience>,
/// Expiration Time - Unix timestamp when the token expires (JWT: exp).
#[serde(rename = "exp", skip_serializing_if = "Option::is_none")]
pub expiration: Option<i64>,
/// Not Before - Unix timestamp when the token becomes valid (JWT: nbf).
#[serde(rename = "nbf", skip_serializing_if = "Option::is_none")]
pub not_before: Option<i64>,
/// Issued At - Unix timestamp when the token was issued (JWT: iat).
#[serde(rename = "iat", skip_serializing_if = "Option::is_none")]
pub issued_at: Option<i64>,
/// JWT ID - Unique identifier for the token to prevent reuse (JWT: jti).
#[serde(rename = "jti", skip_serializing_if = "Option::is_none")]
pub jwt_id: Option<String>,
// OAuth 2.0 Access Token Claims (RFC 9068)
/// Scope - Space-separated list of scopes authorized for the token.
#[serde(rename = "scope", skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
/// Client ID - ID of the OAuth client that obtained the token.
#[serde(rename = "client_id", skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
/// Confirmation - Provides key binding info (e.g., cnf.jkt for PoP tokens).
#[serde(rename = "cnf", skip_serializing_if = "Option::is_none")]
pub confirmation: Option<serde_json::Value>,
/// Authentication Time - Unix timestamp when the user was authenticated.
#[serde(rename = "auth_time", skip_serializing_if = "Option::is_none")]
pub auth_time: Option<i64>,
/// Authorized Party - The party to which the token was issued.
#[serde(rename = "azp", skip_serializing_if = "Option::is_none")]
pub authorized_party: Option<String>,
/// Actor - Used for delegated authorization (on behalf of another party).
#[serde(rename = "act", skip_serializing_if = "Option::is_none")]
pub actor: Option<serde_json::Value>,
/// Session ID - Links the token to a specific user session (for logout, etc.).
#[serde(rename = "sid", skip_serializing_if = "Option::is_none")]
pub session_id: Option<String>,
// OpenID Connect Standard Claims (OIDC Core 1.0)
/// User's full name.
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// User's first name.
#[serde(rename = "given_name", skip_serializing_if = "Option::is_none")]
pub given_name: Option<String>,
/// User's last name.
#[serde(rename = "family_name", skip_serializing_if = "Option::is_none")]
pub family_name: Option<String>,
/// User's middle name.
#[serde(rename = "middle_name", skip_serializing_if = "Option::is_none")]
pub middle_name: Option<String>,
/// Casual name of the user.
#[serde(rename = "nickname", skip_serializing_if = "Option::is_none")]
pub nickname: Option<String>,
/// Preferred username (often login name).
#[serde(rename = "preferred_username", skip_serializing_if = "Option::is_none")]
pub preferred_username: Option<String>,
/// URL of the user's profile page.
#[serde(rename = "profile", skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
/// URL of the user's profile picture.
#[serde(rename = "picture", skip_serializing_if = "Option::is_none")]
pub picture: Option<String>,
/// URL of the user's website.
#[serde(rename = "website", skip_serializing_if = "Option::is_none")]
pub website: Option<String>,
/// User's email address.
#[serde(rename = "email", skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
/// Whether the email has been verified.
#[serde(rename = "email_verified", skip_serializing_if = "Option::is_none")]
pub email_verified: Option<bool>,
/// User's gender.
#[serde(rename = "gender", skip_serializing_if = "Option::is_none")]
pub gender: Option<String>,
/// User's date of birth (e.g., "YYYY-MM-DD").
#[serde(rename = "birthdate", skip_serializing_if = "Option::is_none")]
pub birthdate: Option<String>,
/// User's time zone (e.g., "America/New_York").
#[serde(rename = "zoneinfo", skip_serializing_if = "Option::is_none")]
pub zoneinfo: Option<String>,
/// User's locale (e.g., "en-US").
#[serde(rename = "locale", skip_serializing_if = "Option::is_none")]
pub locale: Option<String>,
/// User's phone number.
#[serde(rename = "phone_number", skip_serializing_if = "Option::is_none")]
pub phone_number: Option<String>,
/// Whether the phone number has been verified.
#[serde(
rename = "phone_number_verified",
skip_serializing_if = "Option::is_none"
)]
pub phone_number_verified: Option<bool>,
/// User's structured address.
#[serde(rename = "address", skip_serializing_if = "Option::is_none")]
pub address: Option<Address>,
/// Last time the user's information was updated (Unix timestamp).
#[serde(rename = "updated_at", skip_serializing_if = "Option::is_none")]
pub updated_at: Option<i64>,
// Microsoft Entra ID (Azure AD) Provider-Specific Claims
/// Object ID of the user or service principal (Entra ID).
#[serde(rename = "oid", skip_serializing_if = "Option::is_none")]
pub object_id: Option<String>,
/// Tenant ID (directory ID) (Entra ID).
#[serde(rename = "tid", skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
/// User Principal Name (login, e.g., user@domain) (Entra ID).
#[serde(rename = "upn", skip_serializing_if = "Option::is_none")]
pub user_principal_name: Option<String>,
/// Assigned roles (Entra ID).
#[serde(rename = "roles", skip_serializing_if = "Option::is_none")]
pub roles: Option<Vec<String>>,
/// Azure AD groups (GUIDs) (Entra ID).
#[serde(rename = "groups", skip_serializing_if = "Option::is_none")]
pub groups: Option<Vec<String>>,
/// Application ID (same as client_id) (Entra ID).
#[serde(rename = "appid", skip_serializing_if = "Option::is_none")]
pub application_id: Option<String>,
/// Unique name (e.g., user@domain) (Entra ID).
#[serde(rename = "unique_name", skip_serializing_if = "Option::is_none")]
pub unique_name: Option<String>,
/// Token version (e.g., "1.0" or "2.0") (Entra ID).
#[serde(rename = "ver", skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
/// Represents an OAuth 2.0 Token Introspection response as per RFC 7662.
///
/// This struct captures the response from an OAuth 2.0 introspection endpoint,
/// providing details about the validity and metadata of an access or refresh token.
/// All fields are optional except `active`, as per the specification, to handle
/// cases where the token is inactive or certain metadata is not provided.
///
/// # Example JSON
/// ```json
/// {
/// "active": true,
/// "scope": "read write",
/// "client_id": "client123",
/// "username": "john_doe",
/// "token_type": "access_token",
/// "exp": 1697054400,
/// "iat": 1697050800,
/// "nbf": 1697050800,
/// "sub": "user123",
/// "aud": ["resource_server_1", "resource_server_2"],
/// "iss": "https://auth.example.com",
/// "jti": "abc123"
/// }
/// ```
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "snake_case")]
pub struct IntrospectionResponse {
/// Indicates whether the token is active (valid, not expired, etc.).
/// This field is required by the OAuth 2.0 introspection specification.
pub active: bool,
/// Space-separated list of scopes granted to the token.
/// Optional, as the token may have no scopes or be inactive.
#[serde(default)]
pub scope: Option<String>,
/// Identifier of the client that requested the token.
/// Optional, as it may not be provided for inactive tokens.
#[serde(default)]
pub client_id: Option<String>,
/// Username of the resource owner associated with the token, if applicable.
/// Optional, as it may not apply to all token types or be absent for inactive tokens.
#[serde(default)]
pub username: Option<String>,
/// Type of the token, typically "access_token" or "refresh_token".
/// Optional, as it may not be provided for inactive tokens.
#[serde(default)]
pub token_type: Option<String>,
/// Expiration Time - Unix timestamp when the token expires (JWT: exp).
#[serde(rename = "exp", skip_serializing_if = "Option::is_none")]
pub expiration: Option<i64>,
/// Issued At - Unix timestamp when the token was issued (JWT: iat).
#[serde(rename = "iat", skip_serializing_if = "Option::is_none")]
pub issued_at: Option<i64>,
/// Not Before - Unix timestamp when the token becomes valid (JWT: nbf).
#[serde(rename = "nbf", skip_serializing_if = "Option::is_none")]
pub not_before: Option<i64>,
/// Subject identifier, often the user ID associated with the token.
/// Optional, as it may not be provided for inactive tokens.
#[serde(rename = "sub", skip_serializing_if = "Option::is_none")]
pub subject: Option<String>,
/// Audience(s) the token is intended for, which can be a single string or an array of strings.
/// Optional, as it may not be provided for inactive tokens.
#[serde(rename = "aud", skip_serializing_if = "Option::is_none")]
pub audience: Option<Audience>,
/// Issuer identifier, typically the URI of the authorization server.
/// Optional, as it may not be provided for inactive tokens.
#[serde(rename = "iss", skip_serializing_if = "Option::is_none")]
pub issuer: Option<String>,
/// JWT ID - Unique identifier for the token to prevent reuse (JWT: jti).
#[serde(rename = "jti", skip_serializing_if = "Option::is_none")]
pub jwt_id: Option<String>,
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/auth/auth_provider/remote_auth_provider.rs | crates/rust-mcp-sdk/src/auth/auth_provider/remote_auth_provider.rs | use crate::{
auth::{
create_protected_resource_metadata_url, AuthInfo, AuthProvider, AuthenticationError,
AuthorizationServerMetadata, OauthEndpoint, OauthProtectedResourceMetadata,
OauthTokenVerifier, WELL_KNOWN_OAUTH_AUTHORIZATION_SERVER,
},
mcp_http::{
middleware::CorsMiddleware, url_base, GenericBody, GenericBodyExt, McpAppState, Middleware,
},
mcp_server::error::{TransportServerError, TransportServerResult},
};
use async_trait::async_trait;
use bytes::Bytes;
use http::{header::CONTENT_TYPE, StatusCode};
use http_body_util::{BodyExt, Full};
use reqwest::Client;
use std::{collections::HashMap, sync::Arc};
/// Represents a **Remote OAuth authentication provider** integrated with the MCP server.
/// This struct defines how the MCP server interacts with an external identity provider
/// that supports **Dynamic Client Registration (DCR)**.
/// The [`RemoteAuthProvider`] enables enterprise-grade authentication by leveraging
/// external OAuth infrastructure, while maintaining secure token verification and
/// identity validation within the MCP server.
pub struct RemoteAuthProvider {
auth_server_meta: AuthorizationServerMetadata,
protected_resource_meta: OauthProtectedResourceMetadata,
token_verifier: Box<dyn OauthTokenVerifier>,
endpoint_map: HashMap<String, OauthEndpoint>,
required_scopes: Option<Vec<String>>,
protected_resource_metadata_url: String,
}
impl RemoteAuthProvider {
pub fn new(
auth_server_meta: AuthorizationServerMetadata,
protected_resource_meta: OauthProtectedResourceMetadata,
token_verifier: Box<dyn OauthTokenVerifier>,
required_scopes: Option<Vec<String>>,
) -> Self {
let mut endpoint_map = HashMap::new();
endpoint_map.insert(
WELL_KNOWN_OAUTH_AUTHORIZATION_SERVER.to_string(),
OauthEndpoint::AuthorizationServerMetadata,
);
let resource_url = &protected_resource_meta.resource;
let relative_url = create_protected_resource_metadata_url(resource_url.path());
let base_url = url_base(resource_url);
let protected_resource_metadata_url =
format!("{}{relative_url}", base_url.trim_end_matches('/'));
endpoint_map.insert(relative_url, OauthEndpoint::ProtectedResourceMetadata);
Self {
auth_server_meta,
protected_resource_meta,
token_verifier,
endpoint_map,
required_scopes,
protected_resource_metadata_url,
}
}
pub async fn with_remote_metadata_url(
authorization_server_metadata_url: &str,
protected_resource_meta: OauthProtectedResourceMetadata,
token_verifier: Box<dyn OauthTokenVerifier>,
required_scopes: Option<Vec<String>>,
) -> Result<Self, reqwest::Error> {
let client = Client::new();
let auth_server_meta = client
.get(authorization_server_metadata_url)
.send()
.await?
.json::<AuthorizationServerMetadata>()
.await?;
Ok(Self::new(
auth_server_meta,
protected_resource_meta,
token_verifier,
required_scopes,
))
}
fn handle_authorization_server_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
fn handle_protected_resource_metadata(
response_str: String,
) -> TransportServerResult<http::Response<GenericBody>> {
use http_body_util::BodyExt;
let body = Full::new(Bytes::from(response_str))
.map_err(|err| TransportServerError::HttpError(err.to_string()))
.boxed();
http::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(body)
.map_err(|err| TransportServerError::HttpError(err.to_string()))
}
}
#[async_trait]
impl AuthProvider for RemoteAuthProvider {
fn protected_resource_metadata_url(&self) -> Option<&str> {
Some(self.protected_resource_metadata_url.as_str())
}
async fn verify_token(&self, access_token: String) -> Result<AuthInfo, AuthenticationError> {
self.token_verifier.verify_token(access_token).await
}
fn required_scopes(&self) -> Option<&Vec<String>> {
self.required_scopes.as_ref()
}
async fn handle_request(
&self,
request: http::Request<&str>,
state: Arc<McpAppState>,
) -> Result<http::Response<GenericBody>, TransportServerError> {
let Some(endpoint) = self.endpoint_type(&request) else {
return http::Response::builder()
.status(StatusCode::NOT_FOUND)
.body(GenericBody::empty())
.map_err(|err| TransportServerError::HttpError(err.to_string()));
};
// return early if method is not allowed
if let Some(response) = self.validate_allowed_methods(endpoint, request.method()) {
return Ok(response);
}
match endpoint {
OauthEndpoint::AuthorizationServerMetadata => {
let json_payload = serde_json::to_string(&self.auth_server_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_authorization_server_metadata(json_payload) },
)
}),
)
.await
}
OauthEndpoint::ProtectedResourceMetadata => {
let json_payload = serde_json::to_string(&self.protected_resource_meta)
.map_err(|err| TransportServerError::HttpError(err.to_string()))?;
let cors = &CorsMiddleware::default();
cors.handle(
request,
state,
Box::new(move |_req, _state| {
Box::pin(
async move { Self::handle_protected_resource_metadata(json_payload) },
)
}),
)
.await
}
_ => Ok(GenericBody::create_404_response()),
}
}
fn auth_endpoints(&self) -> Option<&HashMap<String, OauthEndpoint>> {
Some(&self.endpoint_map)
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_runtimes/client_runtime.rs | crates/rust-mcp-sdk/src/mcp_runtimes/client_runtime.rs | pub mod mcp_client_runtime;
pub mod mcp_client_runtime_core;
use crate::error::{McpSdkError, SdkResult};
use crate::id_generator::FastIdGenerator;
use crate::mcp_traits::{McpClient, McpClientHandler};
use crate::task_store::{ClientTaskStore, ServerTaskStore, TaskStatusPoller, TaskStatusUpdate};
use crate::utils::ensure_server_protocole_compatibility;
use crate::{
mcp_traits::{RequestIdGen, RequestIdGenNumeric},
schema::{
schema_utils::{
ClientMessage, ClientMessages, FromMessage, MessageFromClient, NotificationFromClient,
RequestFromClient, ServerMessage, ServerMessages,
},
InitializeRequestParams, InitializeResult, RequestId, RpcError,
},
};
use async_trait::async_trait;
use futures::future::{join_all, try_join_all};
use futures::StreamExt;
use rust_mcp_schema::schema_utils::ResultFromServer;
use rust_mcp_schema::{GetTaskParams, GetTaskPayloadParams};
#[cfg(feature = "streamable-http")]
use rust_mcp_transport::{ClientStreamableTransport, StreamableTransportOptions};
use rust_mcp_transport::{IoStream, SessionId, StreamId, TaskId, TransportDispatcher};
use std::{sync::Arc, time::Duration};
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::sync::{watch, Mutex};
pub const DEFAULT_STREAM_ID: &str = "STANDALONE-STREAM";
// Define a type alias for the TransportDispatcher trait object
type TransportDispatcherType = dyn TransportDispatcher<
ServerMessages,
MessageFromClient,
ServerMessage,
ClientMessages,
ClientMessage,
>;
type TransportType = Arc<TransportDispatcherType>;
pub struct McpClientOptions<T>
where
T: TransportDispatcher<
ServerMessages,
MessageFromClient,
ServerMessage,
ClientMessages,
ClientMessage,
>,
{
pub client_details: InitializeRequestParams,
pub transport: T,
pub handler: Box<dyn McpClientHandler>,
pub task_store: Option<Arc<ClientTaskStore>>,
pub server_task_store: Option<Arc<ServerTaskStore>>,
}
pub struct ClientRuntime {
// A thread-safe map storing transport types
transport_map: tokio::sync::RwLock<Option<TransportType>>,
// The handler for processing MCP messages
handler: Box<dyn McpClientHandler>,
// Information about the server
client_details: InitializeRequestParams,
handlers: Mutex<Vec<tokio::task::JoinHandle<Result<(), McpSdkError>>>>,
// Generator for unique request IDs
request_id_gen: Box<dyn RequestIdGen>,
// Generator for stream IDs
stream_id_gen: FastIdGenerator,
#[cfg(feature = "streamable-http")]
// Optional configuration for streamable transport
transport_options: Option<StreamableTransportOptions>,
// Flag indicating whether the client has been shut down
is_shut_down: Mutex<bool>,
// Session ID
session_id: tokio::sync::RwLock<Option<SessionId>>,
// Details about the connected server
server_details_tx: watch::Sender<Option<InitializeResult>>,
server_details_rx: watch::Receiver<Option<InitializeResult>>,
task_store: Option<Arc<ClientTaskStore>>,
server_task_store: Option<Arc<ServerTaskStore>>,
}
impl ClientRuntime {
pub(crate) fn new(
client_details: InitializeRequestParams,
transport: TransportType,
handler: Box<dyn McpClientHandler>,
task_store: Option<Arc<ClientTaskStore>>,
server_task_store: Option<Arc<ServerTaskStore>>,
) -> Self {
let (server_details_tx, server_details_rx) =
watch::channel::<Option<InitializeResult>>(None);
Self {
transport_map: tokio::sync::RwLock::new(Some(transport)),
handler,
client_details,
handlers: Mutex::new(vec![]),
request_id_gen: Box::new(RequestIdGenNumeric::new(None)),
#[cfg(feature = "streamable-http")]
transport_options: None,
is_shut_down: Mutex::new(false),
session_id: tokio::sync::RwLock::new(None),
stream_id_gen: FastIdGenerator::new(Some("s_")),
server_details_tx,
server_details_rx,
task_store,
server_task_store,
}
}
#[cfg(feature = "streamable-http")]
pub(crate) fn new_instance(
client_details: InitializeRequestParams,
transport_options: StreamableTransportOptions,
handler: Box<dyn McpClientHandler>,
task_store: Option<Arc<ClientTaskStore>>,
server_task_store: Option<Arc<ServerTaskStore>>,
) -> Self {
let (server_details_tx, server_details_rx) =
watch::channel::<Option<InitializeResult>>(None);
Self {
transport_map: tokio::sync::RwLock::new(None),
handler,
client_details,
handlers: Mutex::new(vec![]),
transport_options: Some(transport_options),
is_shut_down: Mutex::new(false),
session_id: tokio::sync::RwLock::new(None),
request_id_gen: Box::new(RequestIdGenNumeric::new(None)),
stream_id_gen: FastIdGenerator::new(Some("s_")),
server_details_tx,
server_details_rx,
task_store,
server_task_store,
}
}
async fn initialize_request(self: Arc<Self>) -> SdkResult<()> {
let result: ResultFromServer = self
.request(
RequestFromClient::InitializeRequest(self.client_details.clone()),
None,
)
.await?;
if let ResultFromServer::InitializeResult(initialize_result) = result {
ensure_server_protocole_compatibility(
&self.client_details.protocol_version,
&initialize_result.protocol_version,
)?;
// store server details
self.set_server_details(initialize_result)?;
#[cfg(feature = "streamable-http")]
// try to create a sse stream for server initiated messages , if supported by the server
if let Err(error) = self.clone().create_sse_stream().await {
tracing::warn!("{error}");
}
// send a InitializedNotification to the server
self.send_notification(NotificationFromClient::InitializedNotification(None))
.await?;
} else {
return Err(RpcError::invalid_params()
.with_message("Incorrect response to InitializeRequest!")
.into());
}
Ok(())
}
pub(crate) async fn handle_message(
&self,
message: ServerMessage,
transport: &TransportType,
) -> SdkResult<Option<ClientMessage>> {
let response = match message {
ServerMessage::Request(jsonrpc_request) => {
let request_id = jsonrpc_request.request_id().clone();
let result = self.handler.handle_request(jsonrpc_request, self).await;
// create a response to send back to the server
let response: MessageFromClient = match result {
Ok(success_value) => success_value.into(),
Err(error_value) => MessageFromClient::Error(error_value),
};
let mcp_message = ClientMessage::from_message(response, Some(request_id))?;
Some(mcp_message)
}
ServerMessage::Notification(jsonrpc_notification) => {
self.handler
.handle_notification(jsonrpc_notification.into(), self)
.await?;
None
}
ServerMessage::Error(jsonrpc_error) => {
self.handler
.handle_error(&jsonrpc_error.error, self)
.await?;
if let Some(request_id) = jsonrpc_error.id.as_ref() {
if let Some(tx_response) = transport.pending_request_tx(request_id).await {
tx_response
.send(ServerMessage::Error(jsonrpc_error))
.map_err(|e| RpcError::internal_error().with_message(e.to_string()))?;
} else {
tracing::warn!(
"Received an error response with no corresponding request: {:?}",
&request_id
);
}
}
None
}
ServerMessage::Response(response) => {
if let Some(tx_response) = transport.pending_request_tx(&response.id).await {
tx_response
.send(ServerMessage::Response(response))
.map_err(|e| RpcError::internal_error().with_message(e.to_string()))?;
} else {
tracing::warn!(
"Received a response with no corresponding request: {:?}",
&response.id
);
}
None
}
};
Ok(response)
}
async fn start_standalone(self: Arc<Self>) -> SdkResult<()> {
let self_clone = self.clone();
let transport_map = self_clone.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("transport stream does not exists or is closed!".to_string()),
)?;
//TODO: improve the flow
let mut stream = transport.start().await?;
let transport_clone = transport.clone();
let mut error_io_stream = transport.error_stream().write().await;
let error_io_stream = error_io_stream.take();
let self_clone = Arc::clone(&self);
let self_clone_err = Arc::clone(&self);
// task reading from the error stream
let err_task = tokio::spawn(async move {
let self_ref = &*self_clone_err;
if let Some(IoStream::Readable(error_input)) = error_io_stream {
let mut reader = BufReader::new(error_input).lines();
loop {
tokio::select! {
should_break = transport_clone.is_shut_down() =>{
if should_break {
break;
}
}
line = reader.next_line() =>{
match line {
Ok(Some(error_message)) => {
self_ref
.handler
.handle_process_error(error_message, self_ref)
.await?;
}
Ok(None) => {
// end of input
break;
}
Err(e) => {
tracing::error!("Error reading from std_err: {e}");
break;
}
}
}
}
}
}
Ok::<(), McpSdkError>(())
});
let transport = transport.clone();
// main task reading from mcp_message stream
let main_task = tokio::spawn(async move {
while let Some(mcp_messages) = stream.next().await {
let self_ref = &*self_clone;
match mcp_messages {
ServerMessages::Single(server_message) => {
let result = self_ref.handle_message(server_message, &transport).await;
match result {
Ok(result) => {
if let Some(result) = result {
transport
.send_message(ClientMessages::Single(result), None)
.await?;
}
}
Err(error) => {
tracing::error!("Error handling message : {}", error)
}
}
}
ServerMessages::Batch(server_messages) => {
let handling_tasks: Vec<_> = server_messages
.into_iter()
.map(|server_message| {
self_ref.handle_message(server_message, &transport)
})
.collect();
let results: Vec<_> = try_join_all(handling_tasks).await?;
let results: Vec<_> = results.into_iter().flatten().collect();
if !results.is_empty() {
transport
.send_message(ClientMessages::Batch(results), None)
.await?;
}
}
}
}
Ok::<(), McpSdkError>(())
});
// send initialize request to the MCP server
self.clone().initialize_request().await?;
let mut lock = self.handlers.lock().await;
lock.push(main_task);
lock.push(err_task);
Ok(())
}
pub(crate) async fn store_transport(
&self,
stream_id: &str,
transport: TransportType,
) -> SdkResult<()> {
let mut transport_map = self.transport_map.write().await;
tracing::trace!("save transport for stream id : {}", stream_id);
*transport_map = Some(transport);
Ok(())
}
#[cfg(feature = "streamable-http")]
pub(crate) async fn new_transport(
&self,
session_id: Option<SessionId>,
standalone: bool,
) -> SdkResult<
impl TransportDispatcher<
ServerMessages,
MessageFromClient,
ServerMessage,
ClientMessages,
ClientMessage,
>,
> {
use rust_mcp_schema::schema_utils::SdkError;
let options = self
.transport_options
.as_ref()
.ok_or(SdkError::connection_closed())?;
let transport = ClientStreamableTransport::new(options, session_id, standalone)?;
Ok(transport)
}
#[cfg(feature = "streamable-http")]
pub(crate) async fn create_sse_stream(self: Arc<Self>) -> SdkResult<()> {
let stream_id: StreamId = DEFAULT_STREAM_ID.into();
let session_id = self.session_id.read().await.clone();
let transport: Arc<
dyn TransportDispatcher<
ServerMessages,
MessageFromClient,
ServerMessage,
ClientMessages,
ClientMessage,
>,
> = Arc::new(self.new_transport(session_id, true).await?);
let mut stream = transport.start().await?;
self.store_transport(&stream_id, transport.clone()).await?;
let self_clone = Arc::clone(&self);
let main_task = tokio::spawn(async move {
loop {
if let Some(mcp_messages) = stream.next().await {
match mcp_messages {
ServerMessages::Single(server_message) => {
let result = self.handle_message(server_message, &transport).await?;
if let Some(result) = result {
transport
.send_message(ClientMessages::Single(result), None)
.await?;
}
}
ServerMessages::Batch(server_messages) => {
let handling_tasks: Vec<_> = server_messages
.into_iter()
.map(|server_message| {
self.handle_message(server_message, &transport)
})
.collect();
let results: Vec<_> = try_join_all(handling_tasks).await?;
let results: Vec<_> = results.into_iter().flatten().collect();
if !results.is_empty() {
transport
.send_message(ClientMessages::Batch(results), None)
.await?;
}
}
}
// close the stream after all messages are sent, unless it is a standalone stream
if !stream_id.eq(DEFAULT_STREAM_ID) {
return Ok::<_, McpSdkError>(());
}
} else {
// end of stream
return Ok::<_, McpSdkError>(());
}
}
});
let mut lock = self_clone.handlers.lock().await;
lock.push(main_task);
Ok(())
}
#[cfg(feature = "streamable-http")]
pub(crate) async fn start_stream(
&self,
messages: ClientMessages,
timeout: Option<Duration>,
) -> SdkResult<Option<ServerMessages>> {
use futures::stream::{AbortHandle, Abortable};
use rust_mcp_schema::schema_utils::McpMessage;
use crate::IdGenerator;
let stream_id: StreamId = self.stream_id_gen.generate();
let session_id = self.session_id.read().await.clone();
let no_session_id = session_id.is_none();
let has_request = match &messages {
ClientMessages::Single(client_message) => client_message.is_request(),
ClientMessages::Batch(client_messages) => {
use rust_mcp_schema::schema_utils::McpMessage;
client_messages.iter().any(|m| m.is_request())
}
};
let transport: Arc<
dyn TransportDispatcher<
ServerMessages,
MessageFromClient,
ServerMessage,
ClientMessages,
ClientMessage,
>,
> = Arc::new(self.new_transport(session_id, false).await?);
let mut stream = transport.start().await?;
let send_task = async {
let result = transport.send_message(messages, timeout).await?;
if no_session_id {
if let Some(request_id) = transport.session_id().await.clone() {
let mut guard = self.session_id.write().await;
*guard = Some(request_id)
}
}
Ok::<_, McpSdkError>(result)
};
if !has_request {
return send_task.await;
}
let (abort_recv_handle, abort_recv_reg) = AbortHandle::new_pair();
let receive_task = async {
loop {
tokio::select! {
Some(mcp_messages) = stream.next() =>{
match mcp_messages {
ServerMessages::Single(server_message) => {
let result = self.handle_message(server_message, &transport).await?;
if let Some(result) = result {
transport.send_message(ClientMessages::Single(result), None).await?;
}
}
ServerMessages::Batch(server_messages) => {
let handling_tasks: Vec<_> = server_messages
.into_iter()
.map(|server_message| self.handle_message(server_message, &transport))
.collect();
let results: Vec<_> = try_join_all(handling_tasks).await?;
let results: Vec<_> = results.into_iter().flatten().collect();
if !results.is_empty() {
transport.send_message(ClientMessages::Batch(results), None).await?;
}
}
}
// close the stream after all messages are sent, unless it is a standalone stream
if !stream_id.eq(DEFAULT_STREAM_ID){
return Ok::<_, McpSdkError>(());
}
}
}
}
};
let receive_task = Abortable::new(receive_task, abort_recv_reg);
// Pin the tasks to ensure they are not moved
tokio::pin!(send_task);
tokio::pin!(receive_task);
// Run both tasks with cancellation logic
let (send_res, _) = tokio::select! {
res = &mut send_task => {
// cancel the receive_task task, to cover the case where send_task returns with error
abort_recv_handle.abort();
(res, receive_task.await) // Wait for receive_task to finish (it should exit due to cancellation)
}
res = &mut receive_task => {
(send_task.await, res)
}
};
send_res
}
pub(crate) async fn poll_task_status(
self: Arc<ClientRuntime>,
task_id: TaskId,
session_id: Option<SessionId>,
task_store: Arc<ServerTaskStore>,
) -> SdkResult<TaskStatusUpdate> {
let result = self
.request_get_task(GetTaskParams {
task_id: task_id.to_string(),
})
.await?;
if result.is_terminal() {
let task_payload = self
.request_get_task_payload(GetTaskPayloadParams {
task_id: task_id.clone(),
})
.await?;
task_store
.store_task_result(
task_id.as_str(),
result.status,
task_payload.into(),
session_id.as_ref(),
)
.await;
}
Ok((result.status, result.poll_interval))
}
}
#[async_trait]
impl McpClient for ClientRuntime {
async fn send(
&self,
message: MessageFromClient,
request_id: Option<RequestId>,
request_timeout: Option<Duration>,
) -> SdkResult<Option<ServerMessage>> {
#[cfg(feature = "streamable-http")]
{
if self.transport_options.is_some() {
let outgoing_request_id = self
.request_id_gen
.request_id_for_message(&message, request_id);
let mcp_message = ClientMessage::from_message(message, outgoing_request_id)?;
let response = self
.start_stream(ClientMessages::Single(mcp_message), request_timeout)
.await?;
return response
.map(|r| r.as_single())
.transpose()
.map_err(|err| err.into());
}
}
let transport_map = self.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("transport stream does not exists or is closed!".to_string()),
)?;
let outgoing_request_id = self
.request_id_gen
.request_id_for_message(&message, request_id);
let mcp_message = ClientMessage::from_message(message, outgoing_request_id)?;
let response = transport
.send_message(ClientMessages::Single(mcp_message), request_timeout)
.await?;
response
.map(|r| r.as_single())
.transpose()
.map_err(|err| err.into())
}
fn task_store(&self) -> Option<Arc<ClientTaskStore>> {
self.task_store.clone()
}
fn server_task_store(&self) -> Option<Arc<ServerTaskStore>> {
self.server_task_store.clone()
}
async fn session_id(&self) -> Option<SessionId> {
self.session_id.read().await.clone()
}
async fn send_batch(
&self,
messages: Vec<ClientMessage>,
timeout: Option<Duration>,
) -> SdkResult<Option<Vec<ServerMessage>>> {
#[cfg(feature = "streamable-http")]
{
if self.transport_options.is_some() {
let result = self
.start_stream(ClientMessages::Batch(messages), timeout)
.await?;
// let response = self.start_stream(&stream_id, request_id, message).await?;
return result
.map(|r| r.as_batch())
.transpose()
.map_err(|err| err.into());
}
}
let transport_map = self.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("transport stream does not exists or is closed!".to_string()),
)?;
transport
.send_batch(messages, timeout)
.await
.map_err(|err| err.into())
}
async fn start(self: Arc<Self>) -> SdkResult<()> {
let runtime = self.clone();
if let Some(task_store) = runtime.task_store() {
// send TaskStatusNotification if task_store is present and supports subscribe()
if let Some(mut stream) = task_store.subscribe() {
tokio::spawn(async move {
while let Some((params, _)) = stream.next().await {
let _ = runtime.notify_task_status(params).await;
}
});
}
}
let runtime = self.clone();
// Task polling for client initiated tasks
if let Some(server_task_store) = runtime.server_task_store.clone() {
let task_store_clone = server_task_store.clone();
let runtime_clone = runtime.clone();
let callback: TaskStatusPoller = Box::new(move |task_id, session_id| {
let task_store_clone = server_task_store.clone();
let runtime_clone = runtime_clone.clone();
Box::pin(async move {
runtime_clone
.poll_task_status(task_id, session_id, task_store_clone)
.await
})
});
if let Err(error) = task_store_clone.start_task_polling(callback) {
tracing::error!("Failed to start task polling: {error}");
}
}
#[cfg(feature = "streamable-http")]
{
if self.transport_options.is_some() {
self.initialize_request().await?;
return Ok(());
}
}
self.start_standalone().await
}
fn set_server_details(&self, server_details: InitializeResult) -> SdkResult<()> {
self.server_details_tx
.send(Some(server_details))
.map_err(|_| {
RpcError::internal_error()
.with_message("Failed to set server details".to_string())
.into()
})
}
fn client_info(&self) -> &InitializeRequestParams {
&self.client_details
}
fn server_info(&self) -> Option<InitializeResult> {
self.server_details_rx.borrow().clone()
}
async fn is_shut_down(&self) -> bool {
let result = self.is_shut_down.lock().await;
*result
}
async fn shut_down(&self) -> SdkResult<()> {
let mut is_shut_down_lock = self.is_shut_down.lock().await;
*is_shut_down_lock = true;
let mut transport_map = self.transport_map.write().await;
let transport_option = transport_map.take();
drop(transport_map);
if let Some(transport) = transport_option {
let _ = transport.shut_down().await;
}
// wait for tasks
let mut tasks_lock = self.handlers.lock().await;
let join_handlers: Vec<_> = tasks_lock.drain(..).collect();
join_all(join_handlers).await;
Ok(())
}
async fn terminate_session(&self) {
#[cfg(feature = "streamable-http")]
{
if let Some(transport_options) = self.transport_options.as_ref() {
let session_id = self.session_id.read().await.clone();
transport_options
.terminate_session(session_id.as_ref())
.await;
let _ = self.shut_down().await;
}
}
let _ = self.shut_down().await;
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_runtimes/server_runtime.rs | crates/rust-mcp-sdk/src/mcp_runtimes/server_runtime.rs | pub mod mcp_server_runtime;
pub mod mcp_server_runtime_core;
use crate::auth::AuthInfo;
use crate::error::SdkResult;
use crate::mcp_traits::{McpServer, McpServerHandler, RequestIdGen, RequestIdGenNumeric};
use crate::schema::{
schema_utils::{
ClientMessage, ClientMessages, FromMessage, MessageFromServer, SdkError, ServerMessage,
ServerMessages,
},
InitializeRequestParams, InitializeResult, RequestId, RpcError,
};
use crate::task_store::{ClientTaskStore, ServerTaskStore, TaskStatusPoller, TaskStatusUpdate};
use crate::utils::AbortTaskOnDrop;
use async_trait::async_trait;
use futures::future::try_join_all;
use futures::{StreamExt, TryFutureExt};
use rust_mcp_schema::{GetTaskParams, GetTaskPayloadParams};
#[cfg(feature = "hyper-server")]
use rust_mcp_transport::SessionId;
use rust_mcp_transport::{IoStream, TaskId, TransportDispatcher};
use std::panic;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::AsyncWriteExt;
use tokio::sync::{mpsc, oneshot, watch, RwLock, RwLockReadGuard};
pub const DEFAULT_STREAM_ID: &str = "STANDALONE-STREAM";
const TASK_CHANNEL_CAPACITY: usize = 500;
// Define a type alias for the TransportDispatcher trait object
type TransportType = Arc<
dyn TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
>;
/// Struct representing the runtime core of the MCP server, handling transport and client details
pub struct ServerRuntime {
// The handler for processing MCP messages
handler: Arc<dyn McpServerHandler>,
// Information about the server
server_details: Arc<InitializeResult>,
#[cfg(feature = "hyper-server")]
session_id: Option<SessionId>,
transport_map: tokio::sync::RwLock<Option<TransportType>>,
request_id_gen: Box<dyn RequestIdGen>,
client_details_tx: watch::Sender<Option<InitializeRequestParams>>,
client_details_rx: watch::Receiver<Option<InitializeRequestParams>>,
auth_info: tokio::sync::RwLock<Option<AuthInfo>>,
task_store: Option<Arc<ServerTaskStore>>,
client_task_store: Option<Arc<ClientTaskStore>>,
}
pub struct McpServerOptions<T>
where
T: TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
{
pub server_details: InitializeResult,
pub transport: T,
pub handler: Arc<dyn McpServerHandler>,
pub task_store: Option<Arc<ServerTaskStore>>,
pub client_task_store: Option<Arc<ClientTaskStore>>,
}
#[async_trait]
impl McpServer for ServerRuntime {
fn task_store(&self) -> Option<Arc<ServerTaskStore>> {
self.task_store.clone()
}
fn client_task_store(&self) -> Option<Arc<ClientTaskStore>> {
self.client_task_store.clone()
}
/// Set the client details, storing them in client_details
async fn set_client_details(&self, client_details: InitializeRequestParams) -> SdkResult<()> {
self.client_details_tx
.send(Some(client_details))
.map_err(|_| {
RpcError::internal_error()
.with_message("Failed to set client details".to_string())
.into()
})
}
async fn update_auth_info(&self, new_auth_info: Option<AuthInfo>) {
let should_update = {
let current = self.auth_info.read().await;
match (&*current, &new_auth_info) {
(None, Some(_)) => true,
(Some(old), Some(new)) => old.token_unique_id != new.token_unique_id,
(Some(_), None) => true,
(None, None) => false,
}
};
if should_update {
*self.auth_info.write().await = new_auth_info;
}
}
async fn auth_info(&self) -> RwLockReadGuard<'_, Option<AuthInfo>> {
self.auth_info.read().await
}
async fn auth_info_cloned(&self) -> Option<AuthInfo> {
let guard = self.auth_info.read().await;
guard.clone()
}
async fn wait_for_initialization(&self) {
loop {
if self.client_details_rx.borrow().is_some() {
return;
}
let mut rx = self.client_details_rx.clone();
rx.changed().await.ok();
}
}
async fn send(
&self,
message: MessageFromServer,
request_id: Option<RequestId>,
request_timeout: Option<Duration>,
) -> SdkResult<Option<ClientMessage>> {
let transport_map = self.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("transport stream does not exists or is closed!".to_string()),
)?;
let outgoing_request_id = self
.request_id_gen
.request_id_for_message(&message, request_id);
let mcp_message = ServerMessage::from_message(message, outgoing_request_id)?;
let response = transport
.send_message(ServerMessages::Single(mcp_message), request_timeout)
.await?
.map(|res| res.as_single())
.transpose()?;
Ok(response)
}
async fn send_batch(
&self,
messages: Vec<ServerMessage>,
request_timeout: Option<Duration>,
) -> SdkResult<Option<Vec<ClientMessage>>> {
let transport_map = self.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("transport stream does not exists or is closed!".to_string()),
)?;
transport
.send_batch(messages, request_timeout)
.map_err(|err| err.into())
.await
}
/// Returns the server's details, including server capability,
/// instructions, protocol_version , server_info and optional meta data
fn server_info(&self) -> &InitializeResult {
&self.server_details
}
/// Returns the client information if available, after successful initialization , otherwise returns None
fn client_info(&self) -> Option<InitializeRequestParams> {
self.client_details_rx.borrow().clone()
}
/// Main runtime loop, processes incoming messages and handles requests
async fn start(self: Arc<Self>) -> SdkResult<()> {
let self_clone = self.clone();
let transport_map = self_clone.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("transport stream does not exists or is closed!".to_string()),
)?;
let mut stream = transport.start().await?;
// Create a channel to collect results from spawned tasks
let (tx, mut rx) = mpsc::channel(TASK_CHANNEL_CAPACITY);
// Process incoming messages from the client
while let Some(mcp_messages) = stream.next().await {
match mcp_messages {
ClientMessages::Single(client_message) => {
let transport = transport.clone();
let self = self.clone();
let tx = tx.clone();
// Handle incoming messages in a separate task to avoid blocking the stream.
tokio::spawn(async move {
let result = self.handle_message(client_message, &transport).await;
let send_result: SdkResult<_> = match result {
Ok(result) => {
if let Some(result) = result {
transport
.send_message(ServerMessages::Single(result), None)
.map_err(|e| e.into())
.await
} else {
Ok(None)
}
}
Err(error) => {
tracing::error!("Error handling message : {}", error);
Ok(None)
}
};
// Send result to the main loop
if let Err(error) = tx.send(send_result).await {
tracing::error!("Failed to send result to channel: {}", error);
}
});
}
ClientMessages::Batch(client_messages) => {
let transport = transport.clone();
let self = self_clone.clone();
let tx = tx.clone();
tokio::spawn(async move {
let handling_tasks: Vec<_> = client_messages
.into_iter()
.map(|client_message| self.handle_message(client_message, &transport))
.collect();
let send_result = match try_join_all(handling_tasks).await {
Ok(results) => {
let results: Vec<_> = results.into_iter().flatten().collect();
if !results.is_empty() {
transport
.send_message(ServerMessages::Batch(results), None)
.map_err(|e| e.into())
.await
} else {
Ok(None)
}
}
Err(error) => Err(error),
};
if let Err(error) = tx.send(send_result).await {
tracing::error!("Failed to send batch result to channel: {}", error);
}
});
}
}
// Check for results from spawned tasks to propagate errors
while let Ok(result) = rx.try_recv() {
result?; // Propagate errors
}
}
// Drop tx to close the channel and collect remaining results
drop(tx);
while let Some(result) = rx.recv().await {
result?; // Propagate errors
}
return Ok(());
}
async fn stderr_message(&self, message: String) -> SdkResult<()> {
let transport_map = self.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("transport stream does not exists or is closed!".to_string()),
)?;
let mut lock = transport.error_stream().write().await;
if let Some(IoStream::Writable(stderr)) = lock.as_mut() {
stderr.write_all(message.as_bytes()).await?;
stderr.write_all(b"\n").await?;
stderr.flush().await?;
}
Ok(())
}
#[cfg(feature = "hyper-server")]
fn session_id(&self) -> Option<SessionId> {
self.session_id.to_owned()
}
}
impl ServerRuntime {
pub(crate) async fn consume_payload_string(&self, payload: &str) -> SdkResult<()> {
let transport_map = self.transport_map.read().await;
let transport = transport_map.as_ref().ok_or(
RpcError::internal_error()
.with_message("stream id does not exists or is closed!".to_string()),
)?;
transport.consume_string_payload(payload).await?;
Ok(())
}
pub(crate) async fn handle_message(
self: &Arc<Self>,
message: ClientMessage,
transport: &Arc<
dyn TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
>,
) -> SdkResult<Option<ServerMessage>> {
let response = match message {
// Handle a client request
ClientMessage::Request(client_jsonrpc_request) => {
let request_id = client_jsonrpc_request.request_id().clone();
let result = self
.handler
.handle_request(client_jsonrpc_request, self.clone())
.await;
// create a response to send back to the client
let response: MessageFromServer = match result {
Ok(success_value) => success_value.into(),
Err(error_value) => {
// Error occurred during initialization.
// A likely cause could be an unsupported protocol version.
if !self.is_initialized() {
return Err(error_value.into());
}
MessageFromServer::Error(error_value)
}
};
let mpc_message: ServerMessage =
ServerMessage::from_message(response, Some(request_id))?;
Some(mpc_message)
}
ClientMessage::Notification(client_jsonrpc_notification) => {
self.handler
.handle_notification(client_jsonrpc_notification, self.clone())
.await?;
None
}
ClientMessage::Error(jsonrpc_error) => {
self.handler
.handle_error(&jsonrpc_error.error, self.clone())
.await?;
if let Some(request_id) = jsonrpc_error.id.as_ref() {
if let Some(tx_response) = transport.pending_request_tx(request_id).await {
tx_response
.send(ClientMessage::Error(jsonrpc_error))
.map_err(|e| RpcError::internal_error().with_message(e.to_string()))?;
} else {
tracing::warn!(
"Received an error response with no corresponding request {:?}",
&jsonrpc_error.id
);
}
}
None
}
ClientMessage::Response(response) => {
if let Some(tx_response) = transport.pending_request_tx(&response.id).await {
tx_response
.send(ClientMessage::Response(response))
.map_err(|e| RpcError::internal_error().with_message(e.to_string()))?;
} else {
tracing::warn!(
"Received a response with no corresponding request: {:?}",
&response.id
);
}
None
}
};
Ok(response)
}
pub(crate) async fn store_transport(
&self,
stream_id: &str,
transport: Arc<
dyn TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
>,
) -> SdkResult<()> {
if stream_id != DEFAULT_STREAM_ID {
return Ok(());
}
let mut transport_map = self.transport_map.write().await;
tracing::trace!("save transport for stream id : {}", stream_id);
*transport_map = Some(transport);
Ok(())
}
//TODO: re-visit and simplify unnecessary hashmap
pub(crate) async fn remove_transport(&self, stream_id: &str) -> SdkResult<()> {
if stream_id != DEFAULT_STREAM_ID {
return Ok(());
}
let transport_map = self.transport_map.read().await;
tracing::trace!("removing transport for stream id : {}", stream_id);
if let Some(transport) = transport_map.as_ref() {
transport.shut_down().await?;
}
// transport_map.remove(stream_id);
Ok(())
}
pub(crate) async fn shutdown(&self) {
let mut transport_map = self.transport_map.write().await;
let transport_option = transport_map.take();
drop(transport_map);
if let Some(transport) = transport_option {
let _ = transport.shut_down().await;
}
}
pub(crate) async fn default_stream_exists(&self) -> bool {
let transport_map = self.transport_map.read().await;
let live_transport = if let Some(t) = transport_map.as_ref() {
!t.is_shut_down().await
} else {
false
};
live_transport
}
pub(crate) async fn start_stream(
self: Arc<Self>,
transport: Arc<
dyn TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
>,
stream_id: &str,
ping_interval: Duration,
payload: Option<String>,
) -> SdkResult<()> {
let mut stream = transport.start().await?;
if stream_id == DEFAULT_STREAM_ID {
self.store_transport(stream_id, transport.clone()).await?;
}
let self_clone = self.clone();
let (disconnect_tx, mut disconnect_rx) = oneshot::channel::<()>();
let abort_alive_task = transport
.keep_alive(ping_interval, disconnect_tx)
.await?
.abort_handle();
// ensure keep_alive task will be aborted
let _abort_guard = AbortTaskOnDrop {
handle: abort_alive_task,
};
// in case there is a payload, we consume it by transport to get processed
// payload would be message payload coming from the client
if let Some(payload) = payload {
if let Err(err) = transport.consume_string_payload(&payload).await {
let _ = self.remove_transport(stream_id).await;
return Err(err.into());
}
}
// Create a channel to collect results from spawned tasks
let (tx, mut rx) = mpsc::channel(TASK_CHANNEL_CAPACITY);
loop {
tokio::select! {
Some(mcp_messages) = stream.next() =>{
match mcp_messages {
ClientMessages::Single(client_message) => {
let transport = transport.clone();
let self_clone = self.clone();
let tx = tx.clone();
tokio::spawn(async move {
let result = self_clone.handle_message(client_message, &transport).await;
let send_result: SdkResult<_> = match result {
Ok(result) => {
if let Some(result) = result {
transport
.send_message(ServerMessages::Single(result), None)
.map_err(|e| e.into())
.await
} else {
Ok(None)
}
}
Err(error) => {
tracing::error!("Error handling message : {}", error);
Ok(None)
}
};
if let Err(error) = tx.send(send_result).await {
tracing::error!("Failed to send batch result to channel: {}", error);
}
});
}
ClientMessages::Batch(client_messages) => {
let transport = transport.clone();
let self_clone = self_clone.clone();
let tx = tx.clone();
tokio::spawn(async move {
let handling_tasks: Vec<_> = client_messages
.into_iter()
.map(|client_message| self_clone.handle_message(client_message, &transport))
.collect();
let send_result = match try_join_all(handling_tasks).await {
Ok(results) => {
let results: Vec<_> = results.into_iter().flatten().collect();
if !results.is_empty() {
transport.send_message(ServerMessages::Batch(results), None)
.map_err(|e| e.into())
.await
}else {
Ok(None)
}
},
Err(error) => Err(error),
};
if let Err(error) = tx.send(send_result).await {
tracing::error!("Failed to send batch result to channel: {}", error);
}
});
}
}
// Check for results from spawned tasks to propagate errors
while let Ok(result) = rx.try_recv() {
result?; // Propagate errors
}
// close the stream after all messages are sent, unless it is a standalone stream
if !stream_id.eq(DEFAULT_STREAM_ID){
// Drop tx to close the channel and collect remaining results
drop(tx);
while let Some(result) = rx.recv().await {
result?; // Propagate errors
}
return Ok(());
}
}
_ = &mut disconnect_rx => {
// Drop tx to close the channel and collect remaining results
drop(tx);
while let Some(result) = rx.recv().await {
result?; // Propagate errors
}
self.remove_transport(stream_id).await?;
// Disconnection detected by keep-alive task
return Err(SdkError::connection_closed().into());
}
}
}
}
#[cfg(feature = "hyper-server")]
pub(crate) fn new_instance(
server_details: Arc<InitializeResult>,
handler: Arc<dyn McpServerHandler>,
session_id: SessionId,
auth_info: Option<AuthInfo>,
task_store: Option<Arc<ServerTaskStore>>,
client_task_store: Option<Arc<ClientTaskStore>>,
) -> Arc<Self> {
use tokio::sync::RwLock;
let (client_details_tx, client_details_rx) =
watch::channel::<Option<InitializeRequestParams>>(None);
Arc::new(Self {
server_details,
handler,
session_id: Some(session_id),
transport_map: tokio::sync::RwLock::new(None),
client_details_tx,
client_details_rx,
request_id_gen: Box::new(RequestIdGenNumeric::new(None)),
auth_info: RwLock::new(auth_info),
task_store,
client_task_store,
})
}
pub(crate) async fn poll_task_status(
self: Arc<ServerRuntime>,
task_id: TaskId,
session_id: Option<String>,
task_store: Arc<ClientTaskStore>,
) -> SdkResult<TaskStatusUpdate> {
let result = self
.request_get_task(GetTaskParams {
task_id: task_id.to_string(),
})
.await?;
if result.is_terminal() {
let task_payload = self
.request_get_task_payload(GetTaskPayloadParams {
task_id: task_id.clone(),
})
.await?;
task_store
.store_task_result(
task_id.as_str(),
result.status,
task_payload.into(),
session_id.as_ref(),
)
.await;
}
Ok((result.status, result.poll_interval))
}
pub(crate) fn new<T>(options: McpServerOptions<T>) -> Arc<Self>
where
T: TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
{
let (client_details_tx, client_details_rx) =
watch::channel::<Option<InitializeRequestParams>>(None);
let runtime = Arc::new(Self {
server_details: Arc::new(options.server_details),
handler: options.handler,
#[cfg(feature = "hyper-server")]
session_id: None,
transport_map: tokio::sync::RwLock::new(Some(Arc::new(options.transport))),
client_details_tx,
client_details_rx,
request_id_gen: Box::new(RequestIdGenNumeric::new(None)),
auth_info: RwLock::new(None),
task_store: options.task_store,
client_task_store: options.client_task_store,
});
let runtime_clone = runtime.clone();
if let Some(task_store) = runtime_clone.task_store() {
// send TaskStatusNotification if task_store is present and supports subscribe()
if let Some(mut stream) = task_store.subscribe() {
tokio::spawn(async move {
while let Some((params, _)) = stream.next().await {
let _ = runtime_clone.notify_task_status(params).await;
}
});
}
}
// Task polling for server initiated tasks
if let Some(client_task_store) = runtime.client_task_store.clone() {
let task_store_clone = client_task_store.clone();
let runtime_clone = runtime.clone();
let callback: TaskStatusPoller = Box::new(move |task_id, session_id| {
let task_store_clone = client_task_store.clone();
let runtime_clone = runtime_clone.clone();
Box::pin(async move {
runtime_clone
.poll_task_status(task_id, session_id, task_store_clone)
.await
})
});
if let Err(error) = task_store_clone.start_task_polling(callback) {
tracing::error!("Failed to start task polling: {error}");
}
}
runtime
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_runtimes/client_runtime/mcp_client_runtime.rs | crates/rust-mcp-sdk/src/mcp_runtimes/client_runtime/mcp_client_runtime.rs | use super::ClientRuntime;
use super::McpClientOptions;
#[cfg(feature = "streamable-http")]
use crate::task_store::ServerTaskStore;
use crate::task_store::TaskCreator;
use crate::{error::SdkResult, mcp_client::ClientHandler, mcp_traits::McpClientHandler, McpClient};
use crate::{
schema::{
schema_utils::{
ClientMessage, ClientMessages, MessageFromClient, NotificationFromServer,
ResultFromClient, ServerMessage, ServerMessages,
},
InitializeRequestParams, RpcError,
},
task_store::ClientTaskStore,
};
use async_trait::async_trait;
use rust_mcp_schema::schema_utils::ServerJsonrpcRequest;
#[cfg(feature = "streamable-http")]
use rust_mcp_transport::StreamableTransportOptions;
use rust_mcp_transport::TransportDispatcher;
use std::sync::Arc;
/// Creates a new MCP client runtime with the specified configuration.
///
/// This function initializes a client for (MCP) by accepting , client details, a transport ,
/// and a handler for client-side logic.
///
/// The resulting `ClientRuntime` is wrapped in an `Arc` for shared ownership across threads.
///
/// # Arguments
/// * `client_details` - Client name , version and capabilities.
/// * `transport` - An implementation of the `Transport` trait facilitating communication with the MCP server.
/// * `handler` - An implementation of the `ClientHandler` trait that defines the client's
/// core behavior and response logic.
///
/// # Returns
/// An `Arc<ClientRuntime>` representing the initialized client, enabling shared access and
/// asynchronous operation.
///
/// # Examples
/// You can find a detailed example of how to use this function in the repository:
///
/// [Repository Example](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/simple-mcp-client-stdio)
pub fn create_client<T>(options: McpClientOptions<T>) -> Arc<ClientRuntime>
where
T: TransportDispatcher<
ServerMessages,
MessageFromClient,
ServerMessage,
ClientMessages,
ClientMessage,
>,
{
Arc::new(ClientRuntime::new(
options.client_details,
Arc::new(options.transport),
options.handler,
options.task_store,
options.server_task_store,
))
}
#[cfg(feature = "streamable-http")]
pub fn with_transport_options(
client_details: InitializeRequestParams,
transport_options: StreamableTransportOptions,
handler: impl ClientHandler,
task_store: Option<Arc<ClientTaskStore>>,
servertask_store: Option<Arc<ServerTaskStore>>,
) -> Arc<ClientRuntime> {
Arc::new(ClientRuntime::new_instance(
client_details,
transport_options,
Box::new(ClientInternalHandler::new(Box::new(handler))),
task_store,
servertask_store,
))
}
/// Internal handler that wraps a `ClientHandler` trait object.
/// This is used to handle incoming requests and notifications for the client.
pub(crate) struct ClientInternalHandler<H> {
handler: H,
}
impl ClientInternalHandler<Box<dyn ClientHandler>> {
pub fn new(handler: Box<dyn ClientHandler>) -> Self {
Self { handler }
}
}
/// Implementation of the `McpClientHandler` trait for `ClientInternalHandler`.
/// This handles requests, notifications, and errors from the server by calling proper function of self.handler
#[async_trait]
impl McpClientHandler for ClientInternalHandler<Box<dyn ClientHandler>> {
/// Handles a request received from the server by passing the request to self.handler
async fn handle_request(
&self,
server_jsonrpc_request: ServerJsonrpcRequest,
runtime: &dyn McpClient,
) -> std::result::Result<ResultFromClient, RpcError> {
runtime
.capabilities()
.can_handle_request(&server_jsonrpc_request)?;
// prepare a TaskCreator in case request is task augmented and client is configured with a task_store
let task_creator = if server_jsonrpc_request.is_task_augmented() {
let Some(task_store) = runtime.task_store() else {
return Err(RpcError::invalid_request()
.with_message("The server is not configured with a task store.".to_string()));
};
Some(TaskCreator {
request_id: server_jsonrpc_request.request_id().to_owned(),
request: server_jsonrpc_request.clone(),
task_store,
session_id: runtime.session_id().await,
})
} else {
None
};
match server_jsonrpc_request {
ServerJsonrpcRequest::PingRequest(request) => self
.handler
.handle_ping_request(request.params, runtime)
.await
.map(|value| value.into()),
ServerJsonrpcRequest::CreateMessageRequest(request) => {
if request.params.is_task_augmented() {
self.handler
.handle_task_augmented_create_message(request.params, runtime)
.await
.map(|value| value.into())
} else {
self.handler
.handle_create_message_request(request.params, runtime)
.await
.map(|value| value.into())
}
}
ServerJsonrpcRequest::ListRootsRequest(request) => self
.handler
.handle_list_roots_request(request.params, runtime)
.await
.map(|value| value.into()),
ServerJsonrpcRequest::ElicitRequest(request) => {
if request.params.is_task_augmented() {
let Some(task_creator) = task_creator else {
return Err(RpcError::internal_error()
.with_message("Error creating a task!".to_string()));
};
self.handler
.handle_task_augmented_elicit_request(task_creator, request.params, runtime)
.await
.map(|value| value.into())
} else {
self.handler
.handle_elicit_request(request.params, runtime)
.await
.map(|value| value.into())
}
}
ServerJsonrpcRequest::GetTaskRequest(request) => self
.handler
.handle_get_task_request(request.params, runtime)
.await
.map(|value| value.into()),
ServerJsonrpcRequest::GetTaskPayloadRequest(request) => self
.handler
.handle_get_task_payload_request(request.params, runtime)
.await
.map(|value| value.into()),
ServerJsonrpcRequest::CancelTaskRequest(request) => self
.handler
.handle_cancel_task_request(request.params, runtime)
.await
.map(|value| value.into()),
ServerJsonrpcRequest::ListTasksRequest(request) => self
.handler
.handle_list_tasks_request(request.params, runtime)
.await
.map(|value| value.into()),
ServerJsonrpcRequest::CustomRequest(custom_request) => self
.handler
.handle_custom_request(custom_request.into(), runtime)
.await
.map(|value| value.into()),
}
}
/// Handles errors received from the server by passing the request to self.handler
async fn handle_error(
&self,
jsonrpc_error: &RpcError,
runtime: &dyn McpClient,
) -> SdkResult<()> {
self.handler.handle_error(jsonrpc_error, runtime).await?;
Ok(())
}
/// Handles notifications received from the server by passing the request to self.handler
async fn handle_notification(
&self,
server_jsonrpc_notification: NotificationFromServer,
runtime: &dyn McpClient,
) -> SdkResult<()> {
match server_jsonrpc_notification {
NotificationFromServer::CancelledNotification(cancelled_notification) => {
self.handler
.handle_cancelled_notification(cancelled_notification, runtime)
.await?;
}
NotificationFromServer::ProgressNotification(progress_notification) => {
self.handler
.handle_progress_notification(progress_notification, runtime)
.await?;
}
NotificationFromServer::ResourceListChangedNotification(
resource_list_changed_notification,
) => {
self.handler
.handle_resource_list_changed_notification(
resource_list_changed_notification,
runtime,
)
.await?;
}
NotificationFromServer::ResourceUpdatedNotification(resource_updated_notification) => {
self.handler
.handle_resource_updated_notification(resource_updated_notification, runtime)
.await?;
}
NotificationFromServer::PromptListChangedNotification(
prompt_list_changed_notification,
) => {
self.handler
.handle_prompt_list_changed_notification(
prompt_list_changed_notification,
runtime,
)
.await?;
}
NotificationFromServer::ToolListChangedNotification(tool_list_changed_notification) => {
self.handler
.handle_tool_list_changed_notification(tool_list_changed_notification, runtime)
.await?;
}
NotificationFromServer::LoggingMessageNotification(logging_message_notification) => {
self.handler
.handle_logging_message_notification(logging_message_notification, runtime)
.await?;
}
NotificationFromServer::TaskStatusNotification(task_status_notification) => {
self.handler
.handle_task_status_notification(task_status_notification, runtime)
.await?;
}
NotificationFromServer::ElicitationCompleteNotification(
elicitation_complete_notification,
) => {
self.handler
.handle_elicitation_complete_notification(
elicitation_complete_notification,
runtime,
)
.await?;
}
// Handles custom notifications received from the server by passing the request to self.handler
NotificationFromServer::CustomNotification(custom_notification) => {
self.handler
.handle_custom_notification(custom_notification, runtime)
.await?;
}
}
Ok(())
}
/// Handles process errors received from the server over stderr
async fn handle_process_error(
&self,
error_message: String,
runtime: &dyn McpClient,
) -> SdkResult<()> {
self.handler
.handle_process_error(error_message, runtime)
.await
.map_err(|err| err.into())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_runtimes/client_runtime/mcp_client_runtime_core.rs | crates/rust-mcp-sdk/src/mcp_runtimes/client_runtime/mcp_client_runtime_core.rs | use super::ClientRuntime;
use super::McpClientOptions;
use crate::schema::{
schema_utils::{
ClientMessage, ClientMessages, MessageFromClient, NotificationFromServer, ResultFromClient,
ServerMessage, ServerMessages,
},
InitializeRequestParams, RpcError,
};
#[cfg(feature = "streamable-http")]
use crate::task_store::ClientTaskStore;
#[cfg(feature = "streamable-http")]
use crate::task_store::ServerTaskStore;
use crate::{
error::SdkResult,
mcp_handlers::mcp_client_handler_core::ClientHandlerCore,
mcp_traits::{McpClient, McpClientHandler},
};
use async_trait::async_trait;
use rust_mcp_schema::schema_utils::ServerJsonrpcRequest;
#[cfg(feature = "streamable-http")]
use rust_mcp_transport::StreamableTransportOptions;
use rust_mcp_transport::TransportDispatcher;
use std::sync::Arc;
/// Creates a new MCP client runtime with the specified options.
///
/// This function initializes an MCP client runtime by taking a bundled `McpClientOptions<T>` struct
/// that contains all necessary configuration components.
///
/// The resulting `ClientRuntime` is wrapped in an `Arc` to enable safe sharing and concurrent use
/// across asynchronous tasks.
///
/// # Arguments
///
/// * `options` - A `McpClientOptions<T>` struct containing:
/// - `client_details`: Details about the client, including name, version, and capabilities.
/// - `transport`: An implementation of the `TransportDispatcher` trait for communication with the MCP server.
/// - `handler`: The client's core handler (typically a boxed `dyn ClientHandlerCore` or similar)
/// that defines the client's behavior and response logic.
/// - `task_store`: Optional task storage for managing asynchronous operations (if applicable).
///
/// # Returns
///
/// An `Arc<ClientRuntime>` representing the initialized client runtime, ready for shared ownership
/// and asynchronous operation.
///
/// # Examples
///
/// You can find a detailed example of how to use this function in the repository:
///
/// [Repository Example](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/simple-mcp-client-stdio-core)
pub fn create_client<T>(options: McpClientOptions<T>) -> Arc<ClientRuntime>
where
T: TransportDispatcher<
ServerMessages,
MessageFromClient,
ServerMessage,
ClientMessages,
ClientMessage,
>,
{
Arc::new(ClientRuntime::new(
options.client_details,
Arc::new(options.transport),
options.handler,
options.task_store,
options.server_task_store,
))
}
#[cfg(feature = "streamable-http")]
pub fn with_transport_options(
client_details: InitializeRequestParams,
transport_options: StreamableTransportOptions,
handler: impl ClientHandlerCore,
task_store: Option<Arc<ClientTaskStore>>,
servertask_store: Option<Arc<ServerTaskStore>>,
) -> Arc<ClientRuntime> {
Arc::new(ClientRuntime::new_instance(
client_details,
transport_options,
Box::new(ClientCoreInternalHandler::new(Box::new(handler))),
task_store,
servertask_store,
))
}
pub(crate) struct ClientCoreInternalHandler<H> {
handler: H,
}
impl ClientCoreInternalHandler<Box<dyn ClientHandlerCore>> {
pub fn new(handler: Box<dyn ClientHandlerCore>) -> Self {
Self { handler }
}
}
#[async_trait]
impl McpClientHandler for ClientCoreInternalHandler<Box<dyn ClientHandlerCore>> {
async fn handle_request(
&self,
server_jsonrpc_request: ServerJsonrpcRequest,
runtime: &dyn McpClient,
) -> std::result::Result<ResultFromClient, RpcError> {
// handle request and get the result
self.handler
.handle_request(server_jsonrpc_request, runtime)
.await
}
async fn handle_error(
&self,
jsonrpc_error: &RpcError,
runtime: &dyn McpClient,
) -> SdkResult<()> {
self.handler.handle_error(jsonrpc_error, runtime).await?;
Ok(())
}
async fn handle_notification(
&self,
server_jsonrpc_notification: NotificationFromServer,
runtime: &dyn McpClient,
) -> SdkResult<()> {
// handle notification
self.handler
.handle_notification(server_jsonrpc_notification, runtime)
.await?;
Ok(())
}
async fn handle_process_error(
&self,
error_message: String,
runtime: &dyn McpClient,
) -> SdkResult<()> {
self.handler
.handle_process_error(error_message, runtime)
.await
.map_err(|err| err.into())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_runtimes/server_runtime/mcp_server_runtime.rs | crates/rust-mcp-sdk/src/mcp_runtimes/server_runtime/mcp_server_runtime.rs | use super::ServerRuntime;
#[cfg(feature = "hyper-server")]
use crate::{
auth::AuthInfo,
task_store::{ClientTaskStore, ServerTaskStore},
};
use crate::{
error::SdkResult,
mcp_handlers::mcp_server_handler::ServerHandler,
mcp_traits::{McpServer, McpServerHandler},
task_store::TaskCreator,
};
use crate::{
mcp_runtimes::server_runtime::McpServerOptions,
schema::{
schema_utils::{
CallToolError, ClientMessage, ClientMessages, MessageFromServer, ResultFromServer,
ServerMessage, ServerMessages,
},
CallToolResult, InitializeResult, RpcError,
},
};
use async_trait::async_trait;
use rust_mcp_schema::schema_utils::{ClientJsonrpcNotification, ClientJsonrpcRequest};
#[cfg(feature = "hyper-server")]
use rust_mcp_transport::SessionId;
use rust_mcp_transport::TransportDispatcher;
use std::sync::Arc;
/// Creates a new MCP server runtime with the specified configuration.
///
/// This function initializes a server for (MCP) by accepting server details, transport ,
/// and a handler for server-side logic.
/// The resulting `ServerRuntime` manages the server's operation and communication with MCP clients.
///
/// # Arguments
/// * `server_details` - Server name , version and capabilities.
/// * `transport` - An implementation of the `Transport` trait facilitating communication with the MCP clients.
/// * `handler` - An implementation of the `ServerHandler` trait that defines the server's core behavior and response logic.
///
/// # Returns
/// A `ServerRuntime` instance representing the initialized server, ready for asynchronous operation.
///
/// # Examples
/// You can find a detailed example of how to use this function in the repository:
///
/// [Repository Example](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/hello-world-mcp-server-stdio)
pub fn create_server<T>(options: McpServerOptions<T>) -> Arc<ServerRuntime>
where
T: TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
{
ServerRuntime::new(options)
}
#[cfg(feature = "hyper-server")]
pub(crate) fn create_server_instance(
server_details: Arc<InitializeResult>,
handler: Arc<dyn McpServerHandler>,
session_id: SessionId,
auth_info: Option<AuthInfo>,
task_store: Option<Arc<ServerTaskStore>>,
client_task_store: Option<Arc<ClientTaskStore>>,
) -> Arc<ServerRuntime> {
ServerRuntime::new_instance(
server_details,
handler,
session_id,
auth_info,
task_store,
client_task_store,
)
}
pub(crate) struct ServerRuntimeInternalHandler<H> {
handler: H,
}
impl ServerRuntimeInternalHandler<Box<dyn ServerHandler>> {
pub fn new(handler: Box<dyn ServerHandler>) -> Self {
Self { handler }
}
}
#[async_trait]
impl McpServerHandler for ServerRuntimeInternalHandler<Box<dyn ServerHandler>> {
async fn handle_request(
&self,
client_jsonrpc_request: ClientJsonrpcRequest,
runtime: Arc<dyn McpServer>,
) -> std::result::Result<ResultFromServer, RpcError> {
// prepare a TaskCreator in case request is task augmented and server is configured with a task_store
let task_creator = if client_jsonrpc_request.is_task_augmented() {
if !runtime.capabilities().can_run_task_augmented_tools() {
return Err(RpcError::invalid_request()
.with_message("This MCP server does not support \"tasks\".".to_string()));
}
let Some(task_store) = runtime.task_store() else {
return Err(RpcError::invalid_request()
.with_message("The server is not configured with a task store.".to_string()));
};
let session_id = {
#[cfg(feature = "hyper-server")]
{
runtime.session_id()
}
#[cfg(not(feature = "hyper-server"))]
{
None
}
};
Some(TaskCreator {
request_id: client_jsonrpc_request.request_id().to_owned(),
request: client_jsonrpc_request.clone(),
session_id,
task_store,
})
} else {
None
};
runtime
.capabilities()
.can_handle_request(&client_jsonrpc_request)?;
match client_jsonrpc_request {
ClientJsonrpcRequest::InitializeRequest(initialize_request) => self
.handler
.handle_initialize_request(initialize_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::PingRequest(ping_request) => self
.handler
.handle_ping_request(ping_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::ListResourcesRequest(list_resources_request) => self
.handler
.handle_list_resources_request(list_resources_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::ListResourceTemplatesRequest(list_resource_templates_request) => {
self.handler
.handle_list_resource_templates_request(
list_resource_templates_request.params,
runtime,
)
.await
.map(|value| value.into())
}
ClientJsonrpcRequest::ReadResourceRequest(read_resource_request) => self
.handler
.handle_read_resource_request(read_resource_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::SubscribeRequest(subscribe_request) => self
.handler
.handle_subscribe_request(subscribe_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::UnsubscribeRequest(unsubscribe_request) => self
.handler
.handle_unsubscribe_request(unsubscribe_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::ListPromptsRequest(list_prompts_request) => self
.handler
.handle_list_prompts_request(list_prompts_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::GetPromptRequest(prompt_request) => self
.handler
.handle_get_prompt_request(prompt_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::ListToolsRequest(list_tools_request) => self
.handler
.handle_list_tools_request(list_tools_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::CallToolRequest(call_tool_request) => {
let result = if call_tool_request.is_task_augmented() {
let Some(task_creator) = task_creator else {
return Err(CallToolError::from_message("Error creating a task!").into());
};
self.handler
.handle_task_augmented_tool_call(
call_tool_request.params,
task_creator,
runtime,
)
.await
.map_or_else(
|err| {
let result: CallToolResult = CallToolError::new(err).into();
result.into()
},
Into::into,
)
} else {
self.handler
.handle_call_tool_request(call_tool_request.params, runtime)
.await
.map_or_else(
|err| {
let result: CallToolResult = CallToolError::new(err).into();
result.into()
},
Into::into,
)
};
Ok(result)
}
ClientJsonrpcRequest::SetLevelRequest(set_level_request) => self
.handler
.handle_set_level_request(set_level_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::CompleteRequest(complete_request) => self
.handler
.handle_complete_request(complete_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::GetTaskRequest(get_task_request) => self
.handler
.handle_get_task_request(get_task_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::GetTaskPayloadRequest(get_task_payload_request) => self
.handler
.handle_get_task_payload_request(get_task_payload_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::CancelTaskRequest(cancel_task_request) => self
.handler
.handle_cancel_task_request(cancel_task_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::ListTasksRequest(list_tasks_request) => self
.handler
.handle_list_task_request(list_tasks_request.params, runtime)
.await
.map(|value| value.into()),
ClientJsonrpcRequest::CustomRequest(custom_request) => self
.handler
.handle_custom_request(custom_request.into(), runtime)
.await
.map(|value| value.into()),
}
}
async fn handle_error(
&self,
jsonrpc_error: &RpcError,
runtime: Arc<dyn McpServer>,
) -> SdkResult<()> {
self.handler.handle_error(jsonrpc_error, runtime).await?;
Ok(())
}
async fn handle_notification(
&self,
client_jsonrpc_notification: ClientJsonrpcNotification,
runtime: Arc<dyn McpServer>,
) -> SdkResult<()> {
match client_jsonrpc_notification {
ClientJsonrpcNotification::CancelledNotification(cancelled_notification) => {
self.handler
.handle_cancelled_notification(cancelled_notification.params, runtime)
.await?;
}
ClientJsonrpcNotification::InitializedNotification(initialized_notification) => {
self.handler
.handle_initialized_notification(
initialized_notification.params,
runtime.clone(),
)
.await?;
self.handler.on_initialized(runtime).await;
}
ClientJsonrpcNotification::ProgressNotification(progress_notification) => {
self.handler
.handle_progress_notification(progress_notification.params, runtime)
.await?;
}
ClientJsonrpcNotification::RootsListChangedNotification(
roots_list_changed_notification,
) => {
self.handler
.handle_roots_list_changed_notification(
roots_list_changed_notification.params,
runtime,
)
.await?;
}
ClientJsonrpcNotification::TaskStatusNotification(task_status_notification) => {
self.handler
.handle_task_status_notification(task_status_notification.params, runtime)
.await?;
}
ClientJsonrpcNotification::CustomNotification(value) => {
self.handler
.handle_custom_notification(value.into())
.await?;
}
}
Ok(())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/mcp_runtimes/server_runtime/mcp_server_runtime_core.rs | crates/rust-mcp-sdk/src/mcp_runtimes/server_runtime/mcp_server_runtime_core.rs | use super::ServerRuntime;
use crate::error::SdkResult;
use crate::mcp_handlers::mcp_server_handler_core::ServerHandlerCore;
use crate::mcp_runtimes::server_runtime::McpServerOptions;
use crate::mcp_traits::{McpServer, McpServerHandler};
use crate::schema::schema_utils::{
ClientMessage, MessageFromServer, ResultFromServer, ServerMessage,
};
use crate::schema::{
schema_utils::{ClientMessages, ServerMessages},
RpcError,
};
use async_trait::async_trait;
use rust_mcp_schema::schema_utils::{ClientJsonrpcNotification, ClientJsonrpcRequest};
use rust_mcp_transport::TransportDispatcher;
use std::sync::Arc;
/// Creates a new MCP server runtime with the specified configuration.
///
/// This function initializes a server for (MCP) by accepting server details, transport ,
/// and a handler for server-side logic.
/// The resulting `ServerRuntime` manages the server's operation and communication with MCP clients.
///
/// # Arguments
/// * `server_details` - Server name , version and capabilities.
/// * `transport` - An implementation of the `Transport` trait facilitating communication with the MCP clients.
/// * `handler` - An implementation of the `ServerHandlerCore` trait that defines the server's core behavior and response logic.
///
/// # Returns
/// A `ServerRuntime` instance representing the initialized server, ready for asynchronous operation.
///
/// # Examples
/// You can find a detailed example of how to use this function in the repository:
///
/// [Repository Example](https://github.com/rust-mcp-stack/rust-mcp-sdk/tree/main/examples/hello-world-mcp-server-stdio-core)
pub fn create_server<T>(options: McpServerOptions<T>) -> Arc<ServerRuntime>
where
T: TransportDispatcher<
ClientMessages,
MessageFromServer,
ClientMessage,
ServerMessages,
ServerMessage,
>,
{
ServerRuntime::new(options)
}
pub(crate) struct RuntimeCoreInternalHandler<H> {
handler: H,
}
impl RuntimeCoreInternalHandler<Box<dyn ServerHandlerCore>> {
pub fn new(handler: Box<dyn ServerHandlerCore>) -> Self {
Self { handler }
}
}
#[async_trait]
impl McpServerHandler for RuntimeCoreInternalHandler<Box<dyn ServerHandlerCore>> {
async fn handle_request(
&self,
client_jsonrpc_request: ClientJsonrpcRequest,
runtime: Arc<dyn McpServer>,
) -> std::result::Result<ResultFromServer, RpcError> {
// store the client details if the request is a client initialization request
if let ClientJsonrpcRequest::InitializeRequest(initialize_request) = &client_jsonrpc_request
{
// keep a copy of the InitializeRequestParams which includes client_info and capabilities
runtime
.set_client_details(initialize_request.params.clone())
.await
.map_err(|err| RpcError::internal_error().with_message(format!("{err}")))?;
}
// handle request and get the result
self.handler
.handle_request(client_jsonrpc_request.into(), runtime)
.await
}
async fn handle_error(
&self,
jsonrpc_error: &RpcError,
runtime: Arc<dyn McpServer>,
) -> SdkResult<()> {
self.handler.handle_error(jsonrpc_error, runtime).await?;
Ok(())
}
async fn handle_notification(
&self,
client_jsonrpc_notification: ClientJsonrpcNotification,
runtime: Arc<dyn McpServer>,
) -> SdkResult<()> {
// Trigger the `on_initialized()` callback if an `initialized_notification` is received from the client.
if client_jsonrpc_notification.is_initialized_notification() {
self.handler.on_initialized(runtime.clone()).await;
}
// handle notification
self.handler
.handle_notification(client_jsonrpc_notification.into(), runtime)
.await?;
Ok(())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/id_generator/uuid_generator.rs | crates/rust-mcp-sdk/src/id_generator/uuid_generator.rs | use crate::mcp_traits::IdGenerator;
use uuid::Uuid;
/// An [`IdGenerator`] implementation that uses UUID v4 to create unique identifiers.
///
/// This generator produces random UUIDs (version 4), which are highly unlikely
/// to collide and difficult to predict. It is therefore well-suited for
/// generating identifiers such as `SessionId` or other values where uniqueness is important.
pub struct UuidGenerator;
impl<T> IdGenerator<T> for UuidGenerator
where
T: From<String>,
{
fn generate(&self) -> T {
T::from(Uuid::new_v4().to_string())
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/id_generator/fast_id_generator.rs | crates/rust-mcp-sdk/src/id_generator/fast_id_generator.rs | use crate::mcp_traits::IdGenerator;
use base64::Engine;
use std::sync::atomic::{AtomicU64, Ordering};
/// An [`IdGenerator`] implementation optimized for lightweight, locally-scoped identifiers.
///
/// This generator produces short, incrementing identifiers that are Base64-encoded.
/// This makes it well-suited for cases such as `StreamId` generation, where:
/// - IDs only need to be unique within a single process or session
/// - Predictability is acceptable
/// - Shorter, more human-readable identifiers are desirable
///
pub struct FastIdGenerator {
counter: AtomicU64,
///Optional prefix for readability
prefix: &'static str,
}
impl FastIdGenerator {
/// Creates a new ID generator with an optional prefix.
///
/// # Arguments
/// * `prefix` - A static string to prepend to IDs (e.g., "sid_").
pub fn new(prefix: Option<&'static str>) -> Self {
FastIdGenerator {
counter: AtomicU64::new(0),
prefix: prefix.unwrap_or_default(),
}
}
}
impl<T> IdGenerator<T> for FastIdGenerator
where
T: From<String>,
{
/// Generates a new session ID as a short Base64-encoded string.
///
/// Increments an internal counter atomically and encodes it in Base64 URL-safe format.
/// The resulting ID is prefixed (if provided) and typically 8–12 characters long.
///
/// # Returns
/// * `SessionId` - A short, unique session ID (e.g., "sid_BBBB" or "BBBB").
fn generate(&self) -> T {
let id = self.counter.fetch_add(1, Ordering::Relaxed);
let bytes = id.to_le_bytes();
let encoded = base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes);
if self.prefix.is_empty() {
T::from(encoded)
} else {
T::from(format!("{}{}", self.prefix, encoded))
}
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/utils/capability_checks.rs | crates/rust-mcp-sdk/src/utils/capability_checks.rs | /// Formats an assertion error message for unsupported capabilities.
///
/// Constructs a string describing that a specific entity (e.g., server or client) lacks
/// support for a required capability, needed for a particular method.
///
/// # Arguments
/// - `entity`: The name of the entity (e.g., "Server" or "Client") that lacks support.
/// - `capability`: The name of the unsupported capability or tool.
/// - `method_name`: The name of the method requiring the capability.
///
/// # Returns
/// A formatted string detailing the unsupported capability error.
///
/// # Examples
/// ```ignore
/// let msg = create_unsupported_capability_message("Server", "tools", rust_mcp_schema::ListResourcesRequest::method_value());
/// assert_eq!(msg, "Server does not support resources (required for resources/list)");
/// ```
fn create_unsupported_capability_message(
entity: &str,
capability: &str,
method_name: &str,
) -> String {
format!("{entity} does not support {capability} (required for {method_name})")
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/hyper_server.rs | crates/rust-mcp-sdk/src/hyper_servers/hyper_server.rs | use super::{HyperServer, HyperServerOptions};
use crate::mcp_traits::McpServerHandler;
use crate::schema::InitializeResult;
use std::sync::Arc;
/// Creates a new HyperServer instance with the provided handler and options
/// The handler must implement ServerHandler.
///
/// # Arguments
/// * `server_details` - Initialization result from the MCP schema
/// * `handler` - Implementation of the ServerHandlerCore trait
/// * `server_options` - Configuration options for the HyperServer
///
/// # Returns
/// * `HyperServer` - A configured HyperServer instance ready to start
pub fn create_server(
server_details: InitializeResult,
handler: Arc<dyn McpServerHandler + 'static>,
server_options: HyperServerOptions,
) -> HyperServer {
HyperServer::new(server_details, handler, server_options)
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/error.rs | crates/rust-mcp-sdk/src/hyper_servers/error.rs | use std::net::AddrParseError;
use axum::{http::StatusCode, response::IntoResponse};
use thiserror::Error;
#[cfg(feature = "auth")]
use crate::auth::AuthenticationError;
pub type TransportServerResult<T> = core::result::Result<T, TransportServerError>;
#[derive(Debug, Error, Clone)]
pub enum TransportServerError {
#[error("'sessionId' query string is missing!")]
SessionIdMissing,
#[error("No session found for the given ID: {0}.")]
SessionIdInvalid(String),
#[error("Stream IO Error: {0}.")]
StreamIoError(String),
#[error("{0}")]
AddrParseError(#[from] AddrParseError),
#[error("{0}")]
HttpError(String),
#[error("Server start error: {0}")]
ServerStartError(String),
#[error("Invalid options: {0}")]
InvalidServerOptions(String),
#[error("{0}")]
SslCertError(String),
#[error("{0}")]
TransportError(String),
#[cfg(feature = "auth")]
#[error("{0}")]
AuthenticationError(#[from] AuthenticationError),
}
impl IntoResponse for TransportServerError {
//consume self and returns a Response
fn into_response(self) -> axum::response::Response {
let mut response = StatusCode::INTERNAL_SERVER_ERROR.into_response();
response.extensions_mut().insert(self);
response
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/hyper_runtime.rs | crates/rust-mcp-sdk/src/hyper_servers/hyper_runtime.rs | use crate::{
error::SdkResult,
mcp_server::{
error::{TransportServerError, TransportServerResult},
ServerRuntime,
},
session_store::SessionStore,
task_store::{ClientTaskStore, ServerTaskStore, TaskStatusPoller},
};
use crate::{
mcp_http::McpAppState,
mcp_server::HyperServer,
schema::{
schema_utils::{NotificationFromServer, RequestFromServer, ResultFromClient},
CreateMessageRequestParams, CreateMessageResult, InitializeRequestParams, ListRootsResult,
LoggingMessageNotificationParams, NotificationParams, RequestParams,
ResourceUpdatedNotificationParams,
},
McpServer,
};
use axum_server::Handle;
use futures::StreamExt;
use rust_mcp_schema::{
schema_utils::{ClientTaskResult, CustomNotification, CustomRequest},
CancelTaskParams, CancelTaskResult, CancelledNotificationParams, CreateTaskResult,
ElicitCompleteParams, ElicitRequestParams, ElicitResult, GenericResult, GetTaskParams,
GetTaskPayloadParams, GetTaskResult, ProgressNotificationParams, RpcError,
TaskStatusNotificationParams,
};
use rust_mcp_transport::SessionId;
use std::{sync::Arc, time::Duration};
use tokio::task::JoinHandle;
pub struct HyperRuntime {
pub(crate) state: Arc<McpAppState>,
pub(crate) server_task: JoinHandle<Result<(), TransportServerError>>,
pub(crate) server_handle: Handle,
}
impl HyperRuntime {
fn task_poller_callback(
client_task_store: Arc<ClientTaskStore>,
session_store: Arc<dyn SessionStore>,
) -> TaskStatusPoller {
let session_store = session_store.clone();
let task_store_clone = client_task_store.clone();
let callback: TaskStatusPoller = Box::new(move |task_id, session_id| {
let session_store_clone = session_store.clone();
let task_store_clone = task_store_clone.clone();
Box::pin(async move {
let Some(session) = session_id.as_ref() else {
return Err(RpcError::invalid_request()
.with_message("No session id provided!".to_string())
.into());
};
let Some(runtime) = session_store_clone.get(session).await else {
return Err(RpcError::invalid_request()
.with_message("Invalid or broken session!".to_string())
.into());
};
runtime
.poll_task_status(task_id, session_id, task_store_clone)
.await
})
});
callback
}
pub async fn create(server: HyperServer) -> SdkResult<Self> {
let addr = server.options.resolve_server_address().await?;
let state = server.state();
let server_handle = server.server_handle();
let server_task = tokio::spawn(async move {
#[cfg(feature = "ssl")]
if server.options.enable_ssl {
server.start_ssl(addr).await
} else {
server.start_http(addr).await
}
#[cfg(not(feature = "ssl"))]
if server.options.enable_ssl {
panic!("SSL requested but the 'ssl' feature is not enabled");
} else {
server.start_http(addr).await
}
});
// send a TaskStatusNotification if task_store is present and supports subscribe()
let state_clone = state.clone();
if let Some(task_store) = state_clone.task_store.clone() {
if let Some(mut stream) = task_store.subscribe() {
tokio::spawn(async move {
while let Some((params, session_id_opt)) = stream.next().await {
if let Some(session_id) = session_id_opt.as_ref() {
if let Some(transport) = state_clone.session_store.get(session_id).await
{
let _ = transport.notify_task_status(params).await;
}
}
}
});
}
}
// Task polling for server initiated tasks
if let Some(client_task_store) = state.client_task_store.clone() {
let session_store = state.session_store.clone();
let callback: TaskStatusPoller =
Self::task_poller_callback(Arc::clone(&client_task_store), session_store);
client_task_store.start_task_polling(callback)?;
}
Ok(Self {
state,
server_task,
server_handle,
})
}
pub fn graceful_shutdown(&self, timeout: Option<Duration>) {
self.server_handle.graceful_shutdown(timeout);
}
pub async fn await_server(self) -> SdkResult<()> {
let result = self.server_task.await?;
result.map_err(|err| err.into())
}
/// Returns a list of active session IDs from the session store.
pub async fn sessions(&self) -> Vec<String> {
self.state.session_store.keys().await
}
/// Retrieves the runtime associated with the given session ID from the session store.
pub async fn runtime_by_session(
&self,
session_id: &SessionId,
) -> TransportServerResult<Arc<ServerRuntime>> {
self.state.session_store.get(session_id).await.ok_or(
TransportServerError::SessionIdInvalid(session_id.to_string()),
)
}
/// Sends a request to the client and processes the response.
///
/// This function sends a `RequestFromServer` message to the client, waits for the response,
/// and handles the result. If the response is empty or of an invalid type, an error is returned.
/// Otherwise, it returns the result from the client.
pub async fn send_request(
&self,
session_id: &SessionId,
request: RequestFromServer,
timeout: Option<Duration>,
) -> SdkResult<ResultFromClient> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request(request, timeout).await
}
pub async fn send_notification(
&self,
session_id: &SessionId,
notification: NotificationFromServer,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.send_notification(notification).await
}
pub async fn client_info(
&self,
session_id: &SessionId,
) -> SdkResult<Option<InitializeRequestParams>> {
let runtime = self.runtime_by_session(session_id).await?;
Ok(runtime.client_info())
}
/*******************
Requests
*******************/
/// Sends an elicitation request to the client to prompt user input and returns the received response.
///
/// The requested_schema argument allows servers to define the structure of the expected response using a restricted subset of JSON Schema.
/// To simplify client user experience, elicitation schemas are limited to flat objects with primitive properties only
pub async fn request_elicitation(
&self,
session_id: &SessionId,
params: ElicitRequestParams,
) -> SdkResult<ElicitResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_elicitation(params).await
}
pub async fn request_elicitation_task(
&self,
session_id: &SessionId,
params: ElicitRequestParams,
) -> SdkResult<CreateTaskResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_elicitation_task(params).await
}
/// Request a list of root URIs from the client. Roots allow
/// servers to ask for specific directories or files to operate on. A common example
/// for roots is providing a set of repositories or directories a server should operate on.
/// This request is typically used when the server needs to understand the file system
/// structure or access specific locations that the client has permission to read from
pub async fn request_root_list(
&self,
session_id: &SessionId,
params: Option<RequestParams>,
) -> SdkResult<ListRootsResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_root_list(params).await
}
/// A ping request to check that the other party is still alive.
/// The receiver must promptly respond, or else may be disconnected.
///
/// This function creates a `PingRequest` with no specific parameters, sends the request and awaits the response
/// Once the response is received, it attempts to convert it into the expected
/// result type.
///
/// # Returns
/// A `SdkResult` containing the `rust_mcp_schema::Result` if the request is successful.
/// If the request or conversion fails, an error is returned.
pub async fn ping(
&self,
session_id: &SessionId,
params: Option<RequestParams>,
timeout: Option<Duration>,
) -> SdkResult<crate::schema::Result> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.ping(params, timeout).await
}
/// A request from the server to sample an LLM via the client.
/// The client has full discretion over which model to select.
/// The client should also inform the user before beginning sampling,
/// to allow them to inspect the request (human in the loop)
/// and decide whether to approve it.
pub async fn request_message_creation(
&self,
session_id: &SessionId,
params: CreateMessageRequestParams,
) -> SdkResult<CreateMessageResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_message_creation(params).await
}
///Send a request to retrieve the state of a task.
pub async fn request_get_task(
&self,
session_id: &SessionId,
params: GetTaskParams,
) -> SdkResult<GetTaskResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_get_task(params).await
}
///Send a request to retrieve the result of a completed task.
pub async fn request_get_task_payload(
&self,
session_id: &SessionId,
params: GetTaskPayloadParams,
) -> SdkResult<ClientTaskResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_get_task_payload(params).await
}
///Send a request to cancel a task.
pub async fn request_task_cancellation(
&self,
session_id: &SessionId,
params: CancelTaskParams,
) -> SdkResult<CancelTaskResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_task_cancellation(params).await
}
///Send a custom request with a custom method name and params
pub async fn request_custom(
&self,
session_id: &SessionId,
params: CustomRequest,
) -> SdkResult<GenericResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_custom(params).await
}
/*******************
Notifications
*******************/
/// Send log message notification from server to client.
/// If no logging/setLevel request has been sent from the client, the server MAY decide which messages to send automatically.
pub async fn notify_log_message(
&self,
session_id: &SessionId,
params: LoggingMessageNotificationParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_log_message(params).await
}
///Send an optional notification from the server to the client, informing it that
/// the list of prompts it offers has changed.
/// This may be issued by servers without any previous subscription from the client.
pub async fn notify_prompt_list_changed(
&self,
session_id: &SessionId,
params: Option<NotificationParams>,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_prompt_list_changed(params).await
}
///Send an optional notification from the server to the client,
/// informing it that the list of resources it can read from has changed.
/// This may be issued by servers without any previous subscription from the client.
pub async fn notify_resource_list_changed(
&self,
session_id: &SessionId,
params: Option<NotificationParams>,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_resource_list_changed(params).await
}
///Send a notification from the server to the client, informing it that
/// a resource has changed and may need to be read again.
/// This should only be sent if the client previously sent a resources/subscribe request.
pub async fn notify_resource_updated(
&self,
session_id: &SessionId,
params: ResourceUpdatedNotificationParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_resource_updated(params).await
}
///Send an optional notification from the server to the client, informing it that
/// the list of tools it offers has changed.
/// This may be issued by servers without any previous subscription from the client.
pub async fn notify_tool_list_changed(
&self,
session_id: &SessionId,
params: Option<NotificationParams>,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_tool_list_changed(params).await
}
/// This notification can be sent to indicate that it is cancelling a previously-issued request.
/// The request SHOULD still be in-flight, but due to communication latency, it is always possible that this notification MAY arrive after the request has already finished.
/// This notification indicates that the result will be unused, so any associated processing SHOULD cease.
/// A client MUST NOT attempt to cancel its initialize request.
/// For task cancellation, use the tasks/cancel request instead of this notification.
pub async fn notify_cancellation(
&self,
session_id: &SessionId,
params: CancelledNotificationParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_cancellation(params).await
}
///Send an out-of-band notification used to inform the receiver of a progress update for a long-running request.
pub async fn notify_progress(
&self,
session_id: &SessionId,
params: ProgressNotificationParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_progress(params).await
}
/// Send an optional notification from the receiver to the requestor, informing them that a task's status has changed.
/// Receivers are not required to send these notifications.
pub async fn notify_task_status(
&self,
session_id: &SessionId,
params: TaskStatusNotificationParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_task_status(params).await
}
///An optional notification from the server to the client, informing it of a completion of a out-of-band elicitation request.
pub async fn notify_elicitation_completed(
&self,
session_id: &SessionId,
params: ElicitCompleteParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_elicitation_completed(params).await
}
///Send a custom notification
pub async fn notify_custom(
&self,
session_id: &SessionId,
params: CustomNotification,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_custom(params).await
}
#[deprecated(since = "0.8.0", note = "Use `request_root_list()` instead.")]
pub async fn list_roots(
&self,
session_id: &SessionId,
params: Option<RequestParams>,
) -> SdkResult<ListRootsResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_root_list(params).await
}
#[deprecated(since = "0.8.0", note = "Use `request_elicitation()` instead.")]
pub async fn elicit_input(
&self,
session_id: &SessionId,
params: ElicitRequestParams,
) -> SdkResult<ElicitResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_elicitation(params).await
}
#[deprecated(since = "0.8.0", note = "Use `request_message_creation()` instead.")]
pub async fn create_message(
&self,
session_id: &SessionId,
params: CreateMessageRequestParams,
) -> SdkResult<CreateMessageResult> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.request_message_creation(params).await
}
#[deprecated(since = "0.8.0", note = "Use `notify_tool_list_changed()` instead.")]
pub async fn send_tool_list_changed(
&self,
session_id: &SessionId,
params: Option<NotificationParams>,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_tool_list_changed(params).await
}
#[deprecated(since = "0.8.0", note = "Use `notify_resource_updated()` instead.")]
pub async fn send_resource_updated(
&self,
session_id: &SessionId,
params: ResourceUpdatedNotificationParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_resource_updated(params).await
}
#[deprecated(
since = "0.8.0",
note = "Use `notify_resource_list_changed()` instead."
)]
pub async fn send_resource_list_changed(
&self,
session_id: &SessionId,
params: Option<NotificationParams>,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_resource_list_changed(params).await
}
#[deprecated(since = "0.8.0", note = "Use `notify_prompt_list_changed()` instead.")]
pub async fn send_prompt_list_changed(
&self,
session_id: &SessionId,
params: Option<NotificationParams>,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_prompt_list_changed(params).await
}
#[deprecated(since = "0.8.0", note = "Use `notify_log_message()` instead.")]
pub async fn send_logging_message(
&self,
session_id: &SessionId,
params: LoggingMessageNotificationParams,
) -> SdkResult<()> {
let runtime = self.runtime_by_session(session_id).await?;
runtime.notify_log_message(params).await
}
pub fn task_store(&self) -> Option<Arc<ServerTaskStore>> {
self.state.task_store.clone()
}
pub fn client_task_store(&self) -> Option<Arc<ClientTaskStore>> {
self.state.client_task_store.clone()
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/hyper_server_core.rs | crates/rust-mcp-sdk/src/hyper_servers/hyper_server_core.rs | use super::{HyperServer, HyperServerOptions};
use crate::mcp_traits::McpServerHandler;
use crate::schema::InitializeResult;
use std::sync::Arc;
/// Creates a new HyperServer instance with the provided handler and options
/// The handler must implement ServerHandlerCore.
///
/// # Arguments
/// * `server_details` - Initialization result from the MCP schema
/// * `handler` - Implementation of the ServerHandlerCore trait
/// * `server_options` - Configuration options for the HyperServer
///
/// # Returns
/// * `HyperServer` - A configured HyperServer instance ready to start
pub fn create_server(
server_details: InitializeResult,
handler: Arc<dyn McpServerHandler + 'static>,
server_options: HyperServerOptions,
) -> HyperServer {
HyperServer::new(server_details, handler, server_options)
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/server.rs | crates/rust-mcp-sdk/src/hyper_servers/server.rs | use super::{
error::{TransportServerError, TransportServerResult},
routes::app_routes,
};
#[cfg(feature = "auth")]
use crate::auth::AuthProvider;
#[cfg(feature = "auth")]
use crate::mcp_http::middleware::AuthMiddleware;
use crate::{
error::SdkResult,
id_generator::{FastIdGenerator, UuidGenerator},
mcp_http::{
http_utils::{
DEFAULT_MESSAGES_ENDPOINT, DEFAULT_SSE_ENDPOINT, DEFAULT_STREAMABLE_HTTP_ENDPOINT,
},
middleware::DnsRebindProtector,
McpAppState, McpHttpHandler,
},
mcp_server::hyper_runtime::HyperRuntime,
mcp_traits::{IdGenerator, McpServerHandler},
session_store::InMemorySessionStore,
task_store::{ClientTaskStore, ServerTaskStore},
};
use crate::{mcp_http::Middleware, schema::InitializeResult};
use axum::Router;
#[cfg(feature = "ssl")]
use axum_server::tls_rustls::RustlsConfig;
use axum_server::Handle;
use rust_mcp_transport::{event_store::EventStore, SessionId, TransportOptions};
use std::{
net::{SocketAddr, ToSocketAddrs},
path::Path,
sync::Arc,
time::Duration,
};
use tokio::signal;
// Default client ping interval (12 seconds)
const DEFAULT_CLIENT_PING_INTERVAL: Duration = Duration::from_secs(12);
const GRACEFUL_SHUTDOWN_TMEOUT_SECS: u64 = 5;
/// Configuration struct for the Hyper server
/// Used to configure the HyperServer instance.
pub struct HyperServerOptions {
/// Hostname or IP address the server will bind to (default: "127.0.0.1")
pub host: String,
/// Hostname or IP address the server will bind to (default: "8080")
pub port: u16,
/// Optional thread-safe session id generator to generate unique session IDs.
pub session_id_generator: Option<Arc<dyn IdGenerator<SessionId>>>,
/// Optional custom path for the Streamable HTTP endpoint (default: `/mcp`)
pub custom_streamable_http_endpoint: Option<String>,
/// Shared transport configuration used by the server
pub transport_options: Arc<TransportOptions>,
/// Event store for resumability support
/// If provided, resumability will be enabled, allowing clients to reconnect and resume messages
pub event_store: Option<Arc<dyn EventStore>>,
/// Task store for handling incoming task-augmented requests from the client.
/// In other words, for tasks executed on this server.
///
/// When the server receives a task-augmented request (e.g., on `tools/call` or other supported methods),
/// it uses this store to create, manage, and track the lifecycle of the task. This includes generating
/// unique task IDs, storing task state, enforcing TTL, and providing status/results via `tasks/get`,
/// `tasks/result`, etc.
///
/// See the MCP tasks specification for details:
/// <https://modelcontextprotocol.io/specification/2025-11-25/basic/utilities/tasks>
pub task_store: Option<Arc<ServerTaskStore>>,
/// Task store for managing outgoing task-augmented requests sent to the client.
/// In other words, for tasks executed on the client.
///
/// When server (acting as requestor) sends a task-augmented request to the client, it uses this store
/// to track the task ID, poll for status updates using `tasks/get` (respecting the suggested `pollInterval`),
/// retrieve results via `tasks/result` once completed.
///
/// Polling continues until the task reaches a terminal status (`completed`, `failed`, or `cancelled`).
///
/// See the MCP tasks specification for details:
/// <https://modelcontextprotocol.io/specification/2025-11-25/basic/utilities/tasks>
pub client_task_store: Option<Arc<ClientTaskStore>>,
/// This setting only applies to streamable HTTP.
/// If true, the server will return JSON responses instead of starting an SSE stream.
/// This can be useful for simple request/response scenarios without streaming.
/// Default is false (SSE streams are preferred).
pub enable_json_response: Option<bool>,
/// Interval between automatic ping messages sent to clients to detect disconnects
pub ping_interval: Duration,
/// Enables SSL/TLS if set to `true`
pub enable_ssl: bool,
/// Path to the SSL/TLS certificate file (e.g., "cert.pem").
/// Required if `enable_ssl` is `true`.
pub ssl_cert_path: Option<String>,
/// Path to the SSL/TLS private key file (e.g., "key.pem").
/// Required if `enable_ssl` is `true`.
pub ssl_key_path: Option<String>,
/// List of allowed host header values for DNS rebinding protection.
/// If not specified, host validation is disabled.
pub allowed_hosts: Option<Vec<String>>,
/// List of allowed origin header values for DNS rebinding protection.
/// If not specified, origin validation is disabled.
pub allowed_origins: Option<Vec<String>>,
/// Enable DNS rebinding protection (requires allowedHosts and/or allowedOrigins to be configured).
/// Default is false for backwards compatibility.
pub dns_rebinding_protection: bool,
/// If set to true, the SSE transport will also be supported for backward compatibility (default: true)
pub sse_support: bool,
/// Optional custom path for the Server-Sent Events (SSE) endpoint (default: `/sse`)
/// Applicable only if sse_support is true
pub custom_sse_endpoint: Option<String>,
/// Optional custom path for the MCP messages endpoint for sse (default: `/messages`)
/// Applicable only if sse_support is true
pub custom_messages_endpoint: Option<String>,
/// Optional authentication provider for protecting MCP server.
#[cfg(feature = "auth")]
pub auth: Option<Arc<dyn AuthProvider>>,
}
impl HyperServerOptions {
/// Validates the server configuration options
///
/// Ensures that SSL-related paths are provided and valid when SSL is enabled.
///
/// # Returns
/// * `TransportServerResult<()>` - Ok if validation passes, Err with TransportServerError if invalid
pub fn validate(&self) -> TransportServerResult<()> {
if self.enable_ssl {
if self.ssl_cert_path.is_none() || self.ssl_key_path.is_none() {
return Err(TransportServerError::InvalidServerOptions(
"Both 'ssl_cert_path' and 'ssl_key_path' must be provided when SSL is enabled."
.into(),
));
}
if !Path::new(self.ssl_cert_path.as_deref().unwrap_or("")).is_file() {
return Err(TransportServerError::InvalidServerOptions(
"'ssl_cert_path' does not point to a valid or existing file.".into(),
));
}
if !Path::new(self.ssl_key_path.as_deref().unwrap_or("")).is_file() {
return Err(TransportServerError::InvalidServerOptions(
"'ssl_key_path' does not point to a valid or existing file.".into(),
));
}
}
Ok(())
}
/// Resolves the server address from host and port
///
/// Validates the configuration and converts the host/port into a SocketAddr.
/// Handles scheme prefixes (http:// or https://) and logs warnings for mismatches.
///
/// # Returns
/// * `TransportServerResult<SocketAddr>` - The resolved server address or an error
pub(crate) async fn resolve_server_address(&self) -> TransportServerResult<SocketAddr> {
self.validate()?;
let mut host = self.host.to_string();
if let Some(stripped) = self.host.strip_prefix("http://") {
if self.enable_ssl {
tracing::warn!("Warning: Ignoring http:// scheme for SSL; using hostname only");
}
host = stripped.to_string();
} else if let Some(stripped) = host.strip_prefix("https://") {
host = stripped.to_string();
}
let addr = {
let mut iter = (host, self.port)
.to_socket_addrs()
.map_err(|err| TransportServerError::ServerStartError(err.to_string()))?;
match iter.next() {
Some(addr) => addr,
None => format!("{}:{}", self.host, self.port).parse().map_err(
|err: std::net::AddrParseError| {
TransportServerError::ServerStartError(err.to_string())
},
)?,
}
};
Ok(addr)
}
pub fn base_url(&self) -> String {
format!(
"{}://{}:{}",
if self.enable_ssl { "https" } else { "http" },
self.host,
self.port
)
}
pub fn streamable_http_url(&self) -> String {
format!("{}{}", self.base_url(), self.streamable_http_endpoint())
}
pub fn sse_url(&self) -> String {
format!("{}{}", self.base_url(), self.sse_endpoint())
}
pub fn sse_message_url(&self) -> String {
format!("{}{}", self.base_url(), self.sse_messages_endpoint())
}
pub fn sse_endpoint(&self) -> &str {
self.custom_sse_endpoint
.as_deref()
.unwrap_or(DEFAULT_SSE_ENDPOINT)
}
pub fn sse_messages_endpoint(&self) -> &str {
self.custom_messages_endpoint
.as_deref()
.unwrap_or(DEFAULT_MESSAGES_ENDPOINT)
}
pub fn streamable_http_endpoint(&self) -> &str {
self.custom_streamable_http_endpoint
.as_deref()
.unwrap_or(DEFAULT_STREAMABLE_HTTP_ENDPOINT)
}
pub fn needs_dns_protection(&self) -> bool {
self.dns_rebinding_protection
&& (self.allowed_hosts.is_some() || self.allowed_origins.is_some())
}
}
/// Default implementation for HyperServerOptions
///
/// Provides default values for the server configuration, including 127.0.0.1 address,
/// port 8080, default Streamable HTTP endpoint, and 12-second ping interval.
impl Default for HyperServerOptions {
fn default() -> Self {
Self {
host: "127.0.0.1".to_string(),
port: 8080,
custom_sse_endpoint: None,
custom_streamable_http_endpoint: None,
custom_messages_endpoint: None,
ping_interval: DEFAULT_CLIENT_PING_INTERVAL,
transport_options: Default::default(),
enable_ssl: false,
ssl_cert_path: None,
ssl_key_path: None,
session_id_generator: None,
enable_json_response: None,
sse_support: true,
allowed_hosts: None,
allowed_origins: None,
dns_rebinding_protection: false,
event_store: None,
#[cfg(feature = "auth")]
auth: None,
task_store: None,
client_task_store: None,
}
}
}
/// Hyper server struct for managing the Axum-based web server
pub struct HyperServer {
app: Router,
state: Arc<McpAppState>,
pub(crate) options: HyperServerOptions,
handle: Handle,
}
impl HyperServer {
/// Creates a new HyperServer instance
///
/// Initializes the server with the provided server details, handler, and options.
///
/// # Arguments
/// * `server_details` - Initialization result from the MCP schema
/// * `handler` - Shared MCP server handler with static lifetime
/// * `server_options` - Server configuration options
///
/// # Returns
/// * `Self` - A new HyperServer instance
pub(crate) fn new(
server_details: InitializeResult,
handler: Arc<dyn McpServerHandler + 'static>,
mut server_options: HyperServerOptions,
) -> Self {
let state: Arc<McpAppState> = Arc::new(McpAppState {
session_store: Arc::new(InMemorySessionStore::new()),
id_generator: server_options
.session_id_generator
.take()
.map_or(Arc::new(UuidGenerator {}), |g| Arc::clone(&g)),
stream_id_gen: Arc::new(FastIdGenerator::new(Some("s_"))),
server_details: Arc::new(server_details),
handler,
ping_interval: server_options.ping_interval,
transport_options: Arc::clone(&server_options.transport_options),
enable_json_response: server_options.enable_json_response.unwrap_or(false),
event_store: server_options.event_store.as_ref().map(Arc::clone),
task_store: server_options.task_store.take(),
client_task_store: server_options.client_task_store.take(),
});
// populate middlewares
let mut middlewares: Vec<Arc<dyn Middleware>> = vec![];
if server_options.needs_dns_protection() {
//dns pritection middleware
middlewares.push(Arc::new(DnsRebindProtector::new(
server_options.allowed_hosts.take(),
server_options.allowed_origins.take(),
)));
}
let http_handler = {
#[cfg(feature = "auth")]
{
let auth_provider = server_options.auth.take();
// add auth middleware if there is a auth_provider
if let Some(auth_provider) = auth_provider.as_ref() {
middlewares.push(Arc::new(AuthMiddleware::new(auth_provider.clone())))
}
McpHttpHandler::new(auth_provider, middlewares)
}
#[cfg(not(feature = "auth"))]
McpHttpHandler::new(middlewares)
};
let app = app_routes(Arc::clone(&state), &server_options, http_handler);
Self {
app,
state,
options: server_options,
handle: Handle::new(),
}
}
/// Returns a shared reference to the application state
///
/// # Returns
/// * `Arc<McpAppState>` - Shared application state
pub fn state(&self) -> Arc<McpAppState> {
Arc::clone(&self.state)
}
/// Adds a new route to the server
///
/// # Arguments
/// * `path` - The route path (static string)
/// * `route` - The Axum MethodRouter for handling the route
///
/// # Returns
/// * `Self` - The modified HyperServer instance
pub fn with_route(mut self, path: &'static str, route: axum::routing::MethodRouter) -> Self {
self.app = self.app.route(path, route);
self
}
/// Generates server information string
///
/// Constructs a string describing the server type, protocol, address, and SSE endpoint.
///
/// # Arguments
/// * `addr` - Optional SocketAddr; if None, resolves from options
///
/// # Returns
/// * `TransportServerResult<String>` - The server information string or an error
pub async fn server_info(&self, addr: Option<SocketAddr>) -> TransportServerResult<String> {
let addr = addr.unwrap_or(self.options.resolve_server_address().await?);
let server_type = if self.options.enable_ssl {
"SSL server"
} else {
"Server"
};
let protocol = if self.options.enable_ssl {
"https"
} else {
"http"
};
let mut server_url = format!(
"\n• Streamable HTTP {} is available at {}://{}{}",
server_type,
protocol,
addr,
self.options.streamable_http_endpoint()
);
#[cfg(feature = "sse")]
if self.options.sse_support {
let sse_url = format!(
"\n• SSE {} is available at {}://{}{}",
server_type,
protocol,
addr,
self.options.sse_endpoint()
);
server_url.push_str(&sse_url);
};
Ok(server_url)
}
pub fn options(&self) -> &HyperServerOptions {
&self.options
}
// pub fn with_layer<L>(mut self, layer: L) -> Self
// where
// // L: Layer<axum::body::Body> + Clone + Send + Sync + 'static,
// L::Service: Send + Sync + 'static,
// {
// self.router = self.router.layer(layer);
// self
// }
/// Starts the server with SSL support (available when "ssl" feature is enabled)
///
/// # Arguments
/// * `addr` - The server address to bind to
///
/// # Returns
/// * `TransportServerResult<()>` - Ok if the server starts successfully, Err otherwise
#[cfg(feature = "ssl")]
pub(crate) async fn start_ssl(self, addr: SocketAddr) -> TransportServerResult<()> {
let config = RustlsConfig::from_pem_file(
self.options.ssl_cert_path.as_deref().unwrap_or_default(),
self.options.ssl_key_path.as_deref().unwrap_or_default(),
)
.await
.map_err(|err| TransportServerError::SslCertError(err.to_string()))?;
tracing::info!("{}", self.server_info(Some(addr)).await?);
// Spawn a task to trigger shutdown on signal
let handle_clone = self.handle.clone();
let state_clone = self.state().clone();
tokio::spawn(async move {
shutdown_signal(handle_clone, state_clone).await;
});
let handle_clone = self.handle.clone();
axum_server::bind_rustls(addr, config)
.handle(handle_clone)
.serve(self.app.into_make_service())
.await
.map_err(|err| TransportServerError::ServerStartError(err.to_string()))
}
/// Returns server handle that could be used for graceful shutdown
pub fn server_handle(&self) -> Handle {
self.handle.clone()
}
/// Starts the server without SSL
///
/// # Arguments
/// * `addr` - The server address to bind to
///
/// # Returns
/// * `TransportServerResult<()>` - Ok if the server starts successfully, Err otherwise
pub(crate) async fn start_http(self, addr: SocketAddr) -> TransportServerResult<()> {
tracing::info!("{}", self.server_info(Some(addr)).await?);
// Spawn a task to trigger shutdown on signal
let handle_clone = self.handle.clone();
tokio::spawn(async move {
shutdown_signal(handle_clone, self.state.clone()).await;
});
let handle_clone = self.handle.clone();
axum_server::bind(addr)
.handle(handle_clone)
.serve(self.app.into_make_service())
.await
.map_err(|err| TransportServerError::ServerStartError(err.to_string()))
}
/// Starts the server, choosing SSL or HTTP based on configuration
///
/// Resolves the server address and starts the server in either SSL or HTTP mode.
/// Panics if SSL is requested but the "ssl" feature is not enabled.
///
/// # Returns
/// * `SdkResult<()>` - Ok if the server starts successfully, Err otherwise
pub async fn start(self) -> SdkResult<()> {
let runtime = HyperRuntime::create(self).await?;
runtime.await_server().await
}
/// Similar to start() , but returns a HyperRuntime after server started
///
/// HyperRuntime could be used to access sessions and send server initiated messages if needed
///
/// # Returns
/// * `SdkResult<HyperRuntime>` - Ok if the server starts successfully, Err otherwise
pub async fn start_runtime(self) -> SdkResult<HyperRuntime> {
HyperRuntime::create(self).await
}
}
// Shutdown signal handler
async fn shutdown_signal(handle: Handle, state: Arc<McpAppState>) {
// Wait for a Ctrl+C or SIGTERM signal
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("Failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("Failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => {},
_ = terminate => {},
}
tracing::info!("Signal received, starting graceful shutdown");
state.session_store.clear().await;
// Trigger graceful shutdown with a timeout
handle.graceful_shutdown(Some(Duration::from_secs(GRACEFUL_SHUTDOWN_TMEOUT_SECS)));
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::NamedTempFile;
#[test]
fn test_server_options_base_url_custom() {
let options = HyperServerOptions {
host: String::from("127.0.0.1"),
port: 8081,
enable_ssl: true,
..Default::default()
};
assert_eq!(options.base_url(), "https://127.0.0.1:8081");
}
#[test]
fn test_server_options_streamable_http_custom() {
let options = HyperServerOptions {
custom_streamable_http_endpoint: Some(String::from("/abcd/mcp")),
host: String::from("127.0.0.1"),
port: 8081,
enable_ssl: true,
..Default::default()
};
assert_eq!(
options.streamable_http_url(),
"https://127.0.0.1:8081/abcd/mcp"
);
assert_eq!(options.streamable_http_endpoint(), "/abcd/mcp");
}
#[test]
fn test_server_options_sse_custom() {
let options = HyperServerOptions {
custom_sse_endpoint: Some(String::from("/abcd/sse")),
host: String::from("127.0.0.1"),
port: 8081,
enable_ssl: true,
..Default::default()
};
assert_eq!(options.sse_url(), "https://127.0.0.1:8081/abcd/sse");
assert_eq!(options.sse_endpoint(), "/abcd/sse");
}
#[test]
fn test_server_options_sse_messages_custom() {
let options = HyperServerOptions {
custom_messages_endpoint: Some(String::from("/abcd/messages")),
..Default::default()
};
assert_eq!(
options.sse_message_url(),
"http://127.0.0.1:8080/abcd/messages"
);
assert_eq!(options.sse_messages_endpoint(), "/abcd/messages");
}
#[test]
fn test_server_options_needs_dns_protection() {
let options = HyperServerOptions::default();
// should be false by default
assert!(!options.needs_dns_protection());
// should still be false unless allowed_hosts or allowed_origins are also provided
let options = HyperServerOptions {
dns_rebinding_protection: true,
..Default::default()
};
assert!(!options.needs_dns_protection());
// should be true when dns_rebinding_protection is true and allowed_hosts is provided
let options = HyperServerOptions {
dns_rebinding_protection: true,
allowed_hosts: Some(vec![String::from("127.0.0.1")]),
..Default::default()
};
assert!(options.needs_dns_protection());
// should be true when dns_rebinding_protection is true and allowed_origins is provided
let options = HyperServerOptions {
dns_rebinding_protection: true,
allowed_origins: Some(vec![String::from("http://127.0.0.1:8080")]),
..Default::default()
};
assert!(options.needs_dns_protection());
}
#[test]
fn test_server_options_validate() {
let options = HyperServerOptions::default();
assert!(options.validate().is_ok());
// with ssl enabled but no cert or key provided, validate should fail
let options = HyperServerOptions {
enable_ssl: true,
..Default::default()
};
assert!(options.validate().is_err());
// with ssl enabled and invalid cert/key paths, validate should fail
let options = HyperServerOptions {
enable_ssl: true,
ssl_cert_path: Some(String::from("/invalid/path/to/cert.pem")),
ssl_key_path: Some(String::from("/invalid/path/to/key.pem")),
..Default::default()
};
assert!(options.validate().is_err());
// with ssl enabled and valid cert/key paths, validate should succeed
let cert_file =
NamedTempFile::with_suffix(".pem").expect("Expected to create test cert file");
let ssl_cert_path = cert_file
.path()
.to_str()
.expect("Expected to get cert path")
.to_string();
let key_file =
NamedTempFile::with_suffix(".pem").expect("Expected to create test key file");
let ssl_key_path = key_file
.path()
.to_str()
.expect("Expected to get key path")
.to_string();
let options = HyperServerOptions {
enable_ssl: true,
ssl_cert_path: Some(ssl_cert_path),
ssl_key_path: Some(ssl_key_path),
..Default::default()
};
assert!(options.validate().is_ok());
}
#[tokio::test]
async fn test_server_options_resolve_server_address() {
let options = HyperServerOptions::default();
assert!(options.resolve_server_address().await.is_ok());
// valid host should still work
let options = HyperServerOptions {
host: String::from("8.6.7.5"),
port: 309,
..Default::default()
};
assert!(options.resolve_server_address().await.is_ok());
// valid host (prepended with http://) should still work
let options = HyperServerOptions {
host: String::from("http://8.6.7.5"),
port: 309,
..Default::default()
};
assert!(options.resolve_server_address().await.is_ok());
// invalid host should raise an error
let options = HyperServerOptions {
host: String::from("invalid-host"),
port: 309,
..Default::default()
};
assert!(options.resolve_server_address().await.is_err());
}
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/routes.rs | crates/rust-mcp-sdk/src/hyper_servers/routes.rs | #[cfg(feature = "auth")]
pub mod auth_routes;
pub mod fallback_routes;
#[cfg(feature = "sse")]
pub mod messages_routes;
#[cfg(feature = "sse")]
pub mod sse_routes;
pub mod streamable_http_routes;
use super::HyperServerOptions;
use crate::mcp_http::McpAppState;
use crate::mcp_http::McpHttpHandler;
use axum::{Extension, Router};
use std::sync::Arc;
/// Constructs the Axum router with all application routes
///
/// Combines routes for Server-Sent Events, message handling, and fallback routes,
/// attaching the shared application state to the router.
///
/// # Arguments
/// * `state` - Shared application state wrapped in an Arc
/// * `server_options` - Reference to the HyperServer configuration options
///
/// # Returns
/// * `Router` - An Axum router configured with all application routes and state
///
pub fn app_routes(
state: Arc<McpAppState>,
server_options: &HyperServerOptions,
http_handler: McpHttpHandler,
) -> Router {
let http_handler = Arc::new(http_handler);
let router = {
let mut router = Router::new();
#[cfg(feature = "auth")]
{
router = router.merge(auth_routes::routes(http_handler.clone()));
}
router = router.merge(streamable_http_routes::routes(
server_options.streamable_http_endpoint(),
));
#[cfg(feature = "sse")]
{
router = router
.merge(sse_routes::routes(
server_options.sse_endpoint(),
server_options.sse_messages_endpoint(),
))
.merge(messages_routes::routes(
server_options.sse_messages_endpoint(),
));
}
router = router.merge(fallback_routes::routes());
router.with_state(state).layer(Extension(http_handler))
};
router
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/routes/auth_routes.rs | crates/rust-mcp-sdk/src/hyper_servers/routes/auth_routes.rs | use crate::hyper_servers::error::TransportServerResult;
use crate::mcp_http::{McpAppState, McpHttpHandler};
use axum::routing::any;
use axum::Extension;
use axum::{extract::State, response::IntoResponse, Router};
use http::{HeaderMap, Method, Uri};
use std::sync::Arc;
pub fn routes(mcp_handler: Arc<McpHttpHandler>) -> Router<Arc<McpAppState>> {
let endpoints: Vec<&String> = mcp_handler.oauth_endppoints().unwrap_or_default();
endpoints
.into_iter()
.fold(Router::new(), |router, endpoint| {
router.route(endpoint, any(handle_auth_request))
})
}
#[cfg(feature = "auth")]
pub async fn handle_auth_request(
method: Method,
uri: Uri,
headers: HeaderMap,
State(state): State<Arc<McpAppState>>,
Extension(http_handler): Extension<Arc<McpHttpHandler>>,
payload: String,
) -> TransportServerResult<impl IntoResponse> {
let request = McpHttpHandler::create_request(method, uri, headers, Some(payload.as_str()));
let generic_res = http_handler.handle_auth_requests(request, state).await?;
let (parts, body) = generic_res.into_parts();
let resp = axum::response::Response::from_parts(parts, axum::body::Body::new(body));
Ok(resp)
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/routes/fallback_routes.rs | crates/rust-mcp-sdk/src/hyper_servers/routes/fallback_routes.rs | use crate::mcp_http::McpAppState;
use axum::{
http::{StatusCode, Uri},
response::IntoResponse,
Router,
};
use std::sync::Arc;
pub fn routes() -> Router<Arc<McpAppState>> {
Router::new().fallback(not_found)
}
pub async fn not_found(uri: Uri) -> impl IntoResponse {
(
StatusCode::NOT_FOUND,
format!("The requested uri does not exist:\r\nuri: {uri}"),
)
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/routes/streamable_http_routes.rs | crates/rust-mcp-sdk/src/hyper_servers/routes/streamable_http_routes.rs | use crate::hyper_servers::error::TransportServerResult;
use crate::mcp_http::{McpAppState, McpHttpHandler};
use axum::routing::get;
use axum::Extension;
use axum::{
extract::{Query, State},
response::IntoResponse,
routing::{delete, post},
Router,
};
use http::{HeaderMap, Method, Uri};
use std::{collections::HashMap, sync::Arc};
pub fn routes(streamable_http_endpoint: &str) -> Router<Arc<McpAppState>> {
Router::new()
.route(streamable_http_endpoint, get(handle_streamable_http_get))
.route(streamable_http_endpoint, post(handle_streamable_http_post))
.route(
streamable_http_endpoint,
delete(handle_streamable_http_delete),
)
}
pub async fn handle_streamable_http_get(
headers: HeaderMap,
uri: Uri,
State(state): State<Arc<McpAppState>>,
Extension(http_handler): Extension<Arc<McpHttpHandler>>,
) -> TransportServerResult<impl IntoResponse> {
let request = McpHttpHandler::create_request(Method::GET, uri, headers, None);
let generic_res = http_handler.handle_streamable_http(request, state).await?;
let (parts, body) = generic_res.into_parts();
let resp = axum::response::Response::from_parts(parts, axum::body::Body::new(body));
Ok(resp)
}
pub async fn handle_streamable_http_post(
headers: HeaderMap,
uri: Uri,
State(state): State<Arc<McpAppState>>,
Extension(http_handler): Extension<Arc<McpHttpHandler>>,
Query(_params): Query<HashMap<String, String>>,
payload: String,
) -> TransportServerResult<impl IntoResponse> {
let request =
McpHttpHandler::create_request(Method::POST, uri, headers, Some(payload.as_str()));
let generic_res = http_handler.handle_streamable_http(request, state).await?;
let (parts, body) = generic_res.into_parts();
let resp = axum::response::Response::from_parts(parts, axum::body::Body::new(body));
Ok(resp)
}
pub async fn handle_streamable_http_delete(
headers: HeaderMap,
uri: Uri,
State(state): State<Arc<McpAppState>>,
Extension(http_handler): Extension<Arc<McpHttpHandler>>,
) -> TransportServerResult<impl IntoResponse> {
let request = McpHttpHandler::create_request(Method::DELETE, uri, headers, None);
let generic_res = http_handler.handle_streamable_http(request, state).await?;
let (parts, body) = generic_res.into_parts();
let resp = axum::response::Response::from_parts(parts, axum::body::Body::new(body));
Ok(resp)
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/routes/messages_routes.rs | crates/rust-mcp-sdk/src/hyper_servers/routes/messages_routes.rs | use crate::{
hyper_servers::error::TransportServerResult,
mcp_http::{McpAppState, McpHttpHandler},
utils::remove_query_and_hash,
};
use axum::{extract::State, response::IntoResponse, routing::post, Extension, Router};
use http::{HeaderMap, Method, Uri};
use std::sync::Arc;
pub fn routes(sse_message_endpoint: &str) -> Router<Arc<McpAppState>> {
Router::new().route(
remove_query_and_hash(sse_message_endpoint).as_str(),
post(handle_messages),
)
}
pub async fn handle_messages(
uri: Uri,
headers: HeaderMap,
State(state): State<Arc<McpAppState>>,
Extension(http_handler): Extension<Arc<McpHttpHandler>>,
message: String,
) -> TransportServerResult<impl IntoResponse> {
let request = McpHttpHandler::create_request(Method::POST, uri, headers, Some(&message));
let generic_response = http_handler
.handle_sse_message(request, state.clone())
.await?;
let (parts, body) = generic_response.into_parts();
let resp = axum::response::Response::from_parts(parts, axum::body::Body::new(body));
Ok(resp)
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/hyper_servers/routes/sse_routes.rs | crates/rust-mcp-sdk/src/hyper_servers/routes/sse_routes.rs | use crate::hyper_servers::error::TransportServerResult;
use crate::mcp_http::{McpAppState, McpHttpHandler};
use axum::{extract::State, response::IntoResponse, routing::get, Extension, Router};
use http::{HeaderMap, Method, Uri};
use std::sync::Arc;
#[derive(Clone)]
pub struct SseMessageEndpoint(pub String);
/// Configures the SSE routes for the application
///
/// Sets up the Axum router with a single GET route for the specified SSE endpoint.
///
/// # Arguments
/// * `_state` - Shared application state (not used directly in routing)
/// * `sse_endpoint` - The path for the SSE endpoint
///
/// # Returns
/// * `Router<Arc<McpAppState>>` - An Axum router configured with the SSE route
pub fn routes(sse_endpoint: &str, sse_message_endpoint: &str) -> Router<Arc<McpAppState>> {
let sse_message_endpoint = SseMessageEndpoint(sse_message_endpoint.to_string());
Router::new().route(
sse_endpoint,
get(handle_sse).layer(Extension(sse_message_endpoint)),
)
}
/// Handles Server-Sent Events (SSE) connections
///
/// Establishes an SSE connection, sets up a server instance, and streams messages
/// to the client. Manages session creation, periodic pings, and server lifecycle.
///
/// # Arguments
/// * `State(state)` - Extracted application state
///
/// # Returns
/// * `TransportServerResult<impl IntoResponse>` - The SSE response stream or an error
pub async fn handle_sse(
headers: HeaderMap,
uri: Uri,
Extension(sse_message_endpoint): Extension<SseMessageEndpoint>,
Extension(http_handler): Extension<Arc<McpHttpHandler>>,
State(state): State<Arc<McpAppState>>,
) -> TransportServerResult<impl IntoResponse> {
let SseMessageEndpoint(sse_message_endpoint) = sse_message_endpoint;
let request = McpHttpHandler::create_request(Method::GET, uri, headers, None);
let generic_response = http_handler
.handle_sse_connection(request, state.clone(), Some(&sse_message_endpoint))
.await?;
let (parts, body) = generic_response.into_parts();
let resp = axum::response::Response::from_parts(parts, axum::body::Body::new(body));
Ok(resp)
}
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | false |
rust-mcp-stack/rust-mcp-sdk | https://github.com/rust-mcp-stack/rust-mcp-sdk/blob/c25994d3d800242c1413b7401432f2a5bef3c23f/crates/rust-mcp-sdk/src/task_store/in_memory_task_store.rs | crates/rust-mcp-sdk/src/task_store/in_memory_task_store.rs | use super::{CreateTaskOptions, TaskStore};
use crate::error::SdkResult;
use crate::task_store::TaskStatusSignal;
use crate::utils::{current_utc_time, iso8601_time};
use crate::{id_generator::FastIdGenerator, IdGenerator};
use async_trait::async_trait;
use futures::{future::BoxFuture, stream, Stream};
use rust_mcp_schema::{
ListTasksResult, RequestId, RpcError, Task, TaskStatus, TaskStatusNotificationParams,
};
use rust_mcp_transport::{SessionId, TaskId};
use std::cmp::Reverse;
use std::collections::{BTreeMap, BinaryHeap, HashMap};
use std::fmt::{Debug, Display};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use tokio::sync::oneshot::Receiver;
use tokio::sync::{oneshot, RwLock};
use tokio::task::JoinHandle;
/// Parameters returned by a task status polling callback.
///
/// Contains the latest known status of a task and its recommended poll interval
/// (in milliseconds). The poll interval can be adjusted dynamically by the remote
/// side to implement adaptive polling (e.g., longer intervals when idle).
pub type TaskStatusUpdate = (TaskStatus, Option<i64>);
/// A callback used to poll the status of a task from the task receiver side.
/// This will be invoked by the entity initiating the task, which could be either the client or the server.
pub type TaskStatusPoller = Box<
dyn Fn(TaskId, Option<SessionId>) -> BoxFuture<'static, SdkResult<TaskStatusUpdate>>
+ Send
+ Sync
+ 'static,
>;
/// Represents a single scheduled polling operation for a task.
/// The fields are ordered intentionally for correct priority queue behavior:
/// - `next_poll_at`: The exact `Instant` when this task should be polled next.
/// The `Reverse` wrapper ensures the earliest (smallest) `Instant` is popped first (min-heap).
/// - `task_id`: Identifier of the task to poll.
/// - `session_id`: Optional session context. `None` means the task is global (not bound to any session).
///
type ScheduledPoll = (Instant, TaskId, Option<SessionId>);
const DEFAULT_PAGE_SIZE: usize = 50;
const DEFAULT_POLL_INTERVAL: i64 = 1250;
pub struct InMemoryTaskStore<Req, Res>
where
Req: Clone + Send + Sync + 'static,
Res: Clone + Send + Sync + 'static,
{
id_gen: Arc<FastIdGenerator>,
// Inner state protected by RwLock for concurrent access
inner: Arc<tokio::sync::RwLock<InMemoryTaskStoreInner<Req, Res>>>,
page_size: usize,
broadcast: tokio::sync::broadcast::Sender<(TaskStatusNotificationParams, Option<String>)>,
polling_task_handle: Mutex<Option<JoinHandle<()>>>,
}
#[derive(Debug)]
struct TaskEntry<Req, Res> {
task: Task,
#[allow(unused)]
request: Req, // original request that created the task
result: Option<Res>, // stored only after store_task_result
#[allow(unused)]
expires_at: Option<i64>, // Unix millis, for reference (optional now)
meta: Option<serde_json::Map<String, serde_json::Value>>,
result_tx: Option<tokio::sync::oneshot::Sender<(TaskStatus, Option<Res>)>>,
}
impl<Req, Res> Display for TaskEntry<Req, Res> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "task_id: {}", self.task.task_id)?;
writeln!(f, "created_at: {}", self.task.created_at)?;
writeln!(f, "status: {}", self.task.status)?;
writeln!(f, "last_updated_at: {}", self.task.last_updated_at)?;
if let Some(message) = self.task.status_message.as_ref() {
writeln!(f, "status_message: {}", message)?;
}
if let Some(ttl) = self.task.ttl.as_ref() {
writeln!(f, "ttl: {}", ttl)?;
} else {
writeln!(f, "ttl: null")?;
}
Ok(())
}
}
struct InMemoryTaskStoreInner<Req, Res> {
// Map: session_id (None for global) => task_id => TaskEntry
tasks: HashMap<Option<String>, BTreeMap<String, TaskEntry<Req, Res>>>,
// For simple reverse-chronological pagination (newest first)
// session_id => Vec<task_id> sorted by created_at descending
ordered_task_ids: HashMap<Option<String>, Vec<String>>,
// A min-heap for scheduling task-status polling when this task store is used
// to hold requester tasks while waiting for the other party to complete them.
pub poll_schedule: Option<BinaryHeap<Reverse<ScheduledPoll>>>, // min-heap by (next_poll_time, poll_interval,...)
}
impl<Req, Res> InMemoryTaskStoreInner<Req, Res> {
pub(crate) fn re_schedule(&mut self, tasks: &mut Vec<(TaskId, Option<SessionId>, i64)>) {
let Some(poll_schedule) = self.poll_schedule.as_mut() else {
return;
};
let now = Instant::now();
let to_reschedule = tasks.drain(0..);
for (task_id, session_id, poll_interval) in to_reschedule {
let next_poll = now
.checked_add(Duration::from_millis(poll_interval as u64))
.unwrap_or(Instant::now());
poll_schedule.push(Reverse((next_poll, task_id, session_id)));
}
}
pub(crate) fn get_task(
&self,
task_id: &str,
session_id: &Option<String>,
) -> Option<&TaskEntry<Req, Res>> {
self.tasks
.get(session_id)
.and_then(|session_map| session_map.get(task_id))
}
pub(crate) fn remove_task(
&mut self,
task_id: &str,
session_id: &Option<String>,
) -> Option<TaskEntry<Req, Res>> {
self.tasks
.get_mut(session_id)
.and_then(|session_map| session_map.remove(task_id))
}
pub(crate) fn next_sleep_duration(&self) -> Duration {
let now = Instant::now();
if let Some(poll_schedule) = self.poll_schedule.as_ref() {
if let Some(Reverse(entry)) = poll_schedule.peek() {
return entry.0.duration_since(now);
}
};
Duration::from_millis(DEFAULT_POLL_INTERVAL as u64)
}
pub(crate) fn tasks_to_poll(&mut self) -> Vec<(TaskId, Option<SessionId>)> {
let now = Instant::now();
let Some(poll_schedule) = self.poll_schedule.as_mut() else {
return vec![];
};
let mut task_ids = Vec::new();
while let Some(Reverse(entry)) = poll_schedule.peek() {
let (next_poll, task_id, session_id) = &entry;
if next_poll <= &now {
// Pop the task from the schedule
task_ids.push((task_id.clone(), session_id.clone()));
poll_schedule.pop();
// Add task id to the list
} else {
break; // Stop once the task's next_poll > now
}
}
task_ids
}
async fn subscribe_to_task(
&mut self,
task_id: &str,
session_id: &Option<String>,
) -> Option<Receiver<(TaskStatus, Option<Res>)>> {
let entry = self
.tasks
.get_mut(session_id)
.and_then(|session_map| session_map.get_mut(task_id))?;
let (tx_response, rx_response) = oneshot::channel::<(TaskStatus, Option<Res>)>();
entry.result_tx = Some(tx_response);
Some(rx_response)
}
}
impl<Req, Res> InMemoryTaskStore<Req, Res>
where
Req: Debug + Clone + Send + Sync + serde::Deserialize<'static> + serde::Serialize + 'static,
Res: Debug + Clone + Send + Sync + serde::Deserialize<'static> + serde::Serialize + 'static,
{
pub fn new(page_size: Option<usize>) -> Self {
Self {
inner: Arc::new(RwLock::new(InMemoryTaskStoreInner {
tasks: HashMap::new(),
ordered_task_ids: HashMap::new(),
poll_schedule: Some(BinaryHeap::new()),
})),
broadcast: tokio::sync::broadcast::channel(64).0,
page_size: page_size.unwrap_or(DEFAULT_PAGE_SIZE),
id_gen: Arc::new(FastIdGenerator::new(Some("tsk"))),
polling_task_handle: Mutex::new(None),
}
}
}
impl<Req, Res> InMemoryTaskStore<Req, Res>
where
Req: Debug + Clone + Send + Sync + serde::Deserialize<'static> + serde::Serialize + 'static,
Res: Debug + Clone + Send + Sync + serde::Deserialize<'static> + serde::Serialize + 'static,
{
async fn notify_status_change(
&self,
task_entry: &TaskEntry<Req, Res>,
session_id: Option<&String>,
) {
let task = &task_entry.task;
let params = TaskStatusNotificationParams {
created_at: task.created_at.to_owned(),
last_updated_at: task.last_updated_at.to_owned(),
meta: task_entry.meta.clone(),
poll_interval: task.poll_interval,
status: task.status,
status_message: task.status_message.clone(),
task_id: task.task_id.clone(),
ttl: task.ttl,
};
self.publish_status_change(params, session_id).await;
}
}
#[async_trait]
impl<Req, Res> TaskStatusSignal for InMemoryTaskStore<Req, Res>
where
Req: Clone + Debug + Send + Sync + 'static + serde::Deserialize<'static> + serde::Serialize,
Res: Clone + Debug + Send + Sync + 'static + serde::Deserialize<'static> + serde::Serialize,
{
async fn publish_status_change(
&self,
event: TaskStatusNotificationParams,
session_id: Option<&String>,
) {
let _ = self.broadcast.send((event, session_id.cloned()));
}
fn subscribe(
&self,
) -> Option<
Pin<
Box<dyn Stream<Item = (TaskStatusNotificationParams, Option<String>)> + Send + 'static>,
>,
> {
let rx = self.broadcast.subscribe();
let stream = stream::unfold(rx, |mut rx| async move {
loop {
match rx.recv().await {
Ok(item) => return Some((item, rx)),
Err(tokio::sync::broadcast::error::RecvError::Closed) => return None,
Err(tokio::sync::broadcast::error::RecvError::Lagged(skipped)) => {
tracing::warn!("Broadcast lagged: skipped {} messages", skipped);
continue;
}
}
}
});
Some(Box::pin(stream))
}
}
#[async_trait]
impl<Req, Res> TaskStore<Req, Res> for InMemoryTaskStore<Req, Res>
where
Req: Clone + Debug + Send + Sync + 'static + serde::Deserialize<'static> + serde::Serialize,
Res: Clone + Debug + Send + Sync + 'static + serde::Deserialize<'static> + serde::Serialize,
{
async fn create_task(
&self,
task_params: CreateTaskOptions,
_request_id: RequestId,
request: Req,
session_id: Option<String>,
) -> Task {
let mut inner = self.inner.write().await;
let task_id: String = self.id_gen.generate();
let created_at = iso8601_time(current_utc_time(None));
let task = Task {
task_id: task_id.clone(),
created_at: created_at.clone(),
status: TaskStatus::Working,
poll_interval: task_params.poll_interval,
ttl: task_params.ttl,
status_message: None,
last_updated_at: created_at.clone(),
};
let entry = TaskEntry {
task: task.clone(),
request,
result: None,
expires_at: task_params
.ttl
.map(|ttl| current_utc_time(Some(ttl)).unix_timestamp()),
meta: task_params.meta,
result_tx: None,
};
// schedule the tasl for polling
if let Some(schedule) = inner.poll_schedule.as_mut() {
let poll_interval: i64 = task_params.poll_interval.unwrap_or(DEFAULT_POLL_INTERVAL);
let next_poll = Instant::now()
.checked_add(Duration::from_millis(poll_interval as u64))
.unwrap_or(Instant::now());
schedule.push(Reverse((next_poll, task_id.clone(), session_id.clone())));
}
tracing::debug!(
"New task created: {entry} \n{}",
session_id
.as_ref()
.map_or(String::new(), |s| format!("Session: {s}"))
);
// Insert into tasks map
let session_tasks = inner
.tasks
.entry(session_id.clone())
.or_insert_with(BTreeMap::new);
session_tasks.insert(task_id.clone(), entry);
// Insert into ordered list (newest first)
let ordered = inner
.ordered_task_ids
.entry(session_id.clone())
.or_insert_with(Vec::new);
ordered.insert(0, task_id.clone()); // newest at front
// Handle TTL: spawn a one-time cleanup task if ttl is set
if let Some(ttl_duration) = task_params.ttl {
let inner_clone = self.inner.clone();
let session_id_clone = session_id.clone();
let task_id_clone = task_id.clone();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_millis(ttl_duration as u64)).await;
let mut write_guard = inner_clone.write().await;
// Remove from tasks map
if let Some(session_map) = write_guard.tasks.get_mut(&session_id_clone) {
session_map.remove(&task_id_clone);
}
// Remove from ordered list
if let Some(ordered_ids) = write_guard.ordered_task_ids.get_mut(&session_id_clone) {
if let Some(pos) = ordered_ids.iter().position(|id| id == &task_id_clone) {
ordered_ids.remove(pos);
}
}
// Optional: clean up empty session entries
write_guard.tasks.retain(|_, map| !map.is_empty());
write_guard
.ordered_task_ids
.retain(|_, vec| !vec.is_empty());
tracing::debug!("Task {} expired and removed due to TTL", task_id_clone);
});
}
task
}
fn start_task_polling(&self, get_task_callback: TaskStatusPoller) -> SdkResult<()> {
match self.polling_task_handle.lock().map(|v| v.is_some()) {
Ok(has_value) if has_value => {
return Err(RpcError::internal_error()
.with_message("Task polling is already running.")
.into())
}
Err(err) => {
return Err(RpcError::internal_error()
.with_message(err.to_string())
.into())
}
_ => {}
}
let inner = self.inner.clone();
let handle = tokio::spawn(async move {
loop {
let mut to_reschedule: Vec<(TaskId, Option<SessionId>, i64)> = Vec::new();
let tasks_to_poll = {
let mut guard = inner.write().await;
guard.tasks_to_poll()
};
for (task_id, session_id) in tasks_to_poll {
// TODO: avoid cloning
match get_task_callback(task_id.clone(), session_id.clone()).await {
Ok((task_status, poll_interval)) => {
if task_status.is_terminal() {
// remove the task and resolve awaiting task if in terminal state
let mut guard = inner.write().await;
let entry = guard.remove_task(&task_id, &session_id);
if let Some(task_entry) = entry {
if let Some(result_tx) = task_entry.result_tx {
// if fails, then listener is gone, no need to retry
let _ = result_tx.send((task_status, task_entry.result));
}
}
} else {
to_reschedule.push((
task_id.clone(),
session_id,
poll_interval.unwrap_or(DEFAULT_POLL_INTERVAL),
));
}
}
Err(_err) => {
let guard = inner.read().await;
// re-schedule if task still exists and not expired
if let Some(get_task) = guard.get_task(&task_id, &session_id) {
to_reschedule.push((
task_id,
session_id,
get_task.task.poll_interval.unwrap_or(DEFAULT_POLL_INTERVAL),
));
}
}
}
}
if !to_reschedule.is_empty() {
let mut guard = inner.write().await;
guard.re_schedule(&mut to_reschedule)
}
let guard = inner.read().await;
let sleep_duration = guard.next_sleep_duration();
tokio::time::sleep(sleep_duration).await;
}
});
let mut lock = match self.polling_task_handle.lock() {
Ok(value) => value,
Err(err) => {
return Err(RpcError::internal_error()
.with_message(err.to_string())
.into())
}
};
*lock = Some(handle);
Ok(())
}
async fn wait_for_task_result(
&self,
task_id: &str,
session_id: Option<String>,
) -> SdkResult<(TaskStatus, Option<Res>)> {
let rx_option = {
let mut guard = self.inner.write().await;
guard.subscribe_to_task(task_id, &session_id).await
};
let Some(rx) = rx_option else {
return Err(RpcError::internal_error()
.with_message("task does not exists!")
.into());
};
match rx.await {
Ok(result) => Ok(result),
Err(err) => Err(RpcError::internal_error()
.with_message(err.to_string())
.into()),
}
}
async fn get_task(&self, task_id: &str, session_id: Option<String>) -> Option<Task> {
let inner = self.inner.read().await;
inner
.tasks
.get(&session_id)
.and_then(|map| map.get(task_id))
.map(|entry| entry.task.clone())
}
async fn store_task_result(
&self,
task_id: &str,
status: TaskStatus,
result: Res,
session_id: Option<&String>,
) -> () {
let mut inner = self.inner.write().await;
if let Some(session_map) = inner.tasks.get_mut(&session_id.map(|v| v.to_string())) {
if let Some(entry) = session_map.get_mut(task_id) {
let status_has_changed = entry.task.status != status;
entry.task.status = status;
entry.result = Some(result.clone());
entry.task.last_updated_at = iso8601_time(current_utc_time(None));
entry.task.status_message = None;
tracing::debug!("Task result stored: {entry}");
if status_has_changed {
self.notify_status_change(entry, session_id).await;
}
}
}
}
async fn get_task_result(&self, task_id: &str, session_id: Option<String>) -> Option<Res> {
let inner = self.inner.read().await;
inner
.tasks
.get(&session_id)
.and_then(|map| map.get(task_id))
.and_then(|entry| entry.result.clone())
}
async fn update_task_status(
&self,
task_id: &str,
status: TaskStatus,
status_message: Option<String>,
session_id: Option<String>,
) -> () {
let mut inner = self.inner.write().await;
if let Some(session_map) = inner.tasks.get_mut(&session_id) {
if let Some(entry) = session_map.get_mut(task_id) {
if entry.task.status != status {
self.notify_status_change(entry, session_id.as_ref()).await;
}
entry.task.status = status;
entry.task.status_message = status_message;
entry.task.last_updated_at = iso8601_time(current_utc_time(None));
tracing::debug!("Task status updated: {entry}");
}
}
}
async fn list_tasks(
&self,
cursor: Option<String>,
session_id: Option<String>,
) -> ListTasksResult {
let inner = self.inner.read().await;
let ordered_ids = match inner.ordered_task_ids.get(&session_id) {
Some(ids) => ids,
None => {
return ListTasksResult {
tasks: vec![],
next_cursor: None,
meta: None,
};
}
};
let start_idx = cursor
.as_ref()
.and_then(|c| ordered_ids.iter().position(|id| id == c))
.unwrap_or(0);
let end_idx = (start_idx + self.page_size).min(ordered_ids.len());
let page_ids = &ordered_ids[start_idx..end_idx];
let tasks: Vec<Task> = page_ids
.iter()
.filter_map(|id| {
inner
.tasks
.get(&session_id)
.and_then(|map| map.get(id))
.map(|entry| entry.task.clone())
})
.collect();
let next_cursor = if end_idx < ordered_ids.len() {
ordered_ids.get(end_idx).cloned()
} else {
None
};
ListTasksResult {
tasks,
next_cursor,
meta: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
use tokio::time::{advance, pause, resume};
fn create_options(ttl_ms: Option<i64>) -> CreateTaskOptions {
CreateTaskOptions {
ttl: ttl_ms,
poll_interval: Some(1000),
meta: None,
}
}
fn dummy_request() -> serde_json::Value {
serde_json::json!({
"method": "tools/call",
"params": { "name": "test-tool" }
})
}
#[tokio::test]
async fn create_task_creates_with_working_status() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let task = store
.create_task(
create_options(Some(60_000)),
123.into(),
dummy_request(),
None,
)
.await;
assert!(task.task_id.len() > 0);
assert_eq!(task.status, TaskStatus::Working);
assert_eq!(task.ttl, Some(60_000));
assert!(task.poll_interval.is_some());
assert!(task.created_at.len() > 0);
assert!(task.last_updated_at.len() > 0);
}
#[tokio::test]
async fn create_task_without_ttl() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let task = store
.create_task(create_options(None), 456.into(), dummy_request(), None)
.await;
assert_eq!(task.ttl, None);
}
#[tokio::test]
async fn task_ids_are_unique() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let task1 = store
.create_task(create_options(None), 789.into(), dummy_request(), None)
.await;
let task2 = store
.create_task(create_options(None), 790.into(), dummy_request(), None)
.await;
assert_ne!(task1.task_id, task2.task_id);
}
#[tokio::test]
async fn get_task_returns_none_for_missing() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let task = store.get_task("non-existent", None).await;
assert!(task.is_none());
}
#[tokio::test]
async fn update_and_get_task_status() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let created = store
.create_task(create_options(None), 111.into(), dummy_request(), None)
.await;
store
.update_task_status(&created.task_id, TaskStatus::InputRequired, None, None)
.await;
let task = store.get_task(&created.task_id, None).await.unwrap();
assert_eq!(task.status, TaskStatus::InputRequired);
}
#[tokio::test]
async fn store_and_retrieve_task_result() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let created = store
.create_task(
create_options(Some(60_000)),
333.into(),
dummy_request(),
None,
)
.await;
let result = serde_json::json!({
"content": [{ "type": "text", "text": "Success!" }]
});
store
.store_task_result(
&created.task_id,
TaskStatus::Completed,
result.clone(),
None,
)
.await;
let task = store.get_task(&created.task_id, None).await.unwrap();
assert_eq!(task.status, TaskStatus::Completed);
let stored = store.get_task_result(&created.task_id, None).await;
assert_eq!(stored, Some(result));
}
#[tokio::test]
async fn ttl_expires_task_precisely() {
pause(); // Make time controlled
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let created = store
.create_task(
create_options(Some(1000)),
666.into(),
dummy_request(),
None,
)
.await;
let task = store.get_task(&created.task_id, None).await;
assert!(task.is_some());
advance_time_ms(10001).await;
let task = store.get_task(&created.task_id, None).await;
assert!(task.is_none());
resume();
}
#[tokio::test]
async fn tasks_without_ttl_do_not_expire() {
pause();
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let created = store
.create_task(create_options(None), 888.into(), dummy_request(), None)
.await;
advance_time_ms(10001).await;
let task = store.get_task(&created.task_id, None).await;
assert!(task.is_some());
resume();
}
async fn advance_time_ms(ms: u64) {
tokio::task::yield_now().await;
advance(Duration::from_millis(ms)).await;
tokio::task::yield_now().await;
}
#[tokio::test]
async fn completed_tasks_still_expire_after_ttl() {
pause();
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let created = store
.create_task(
create_options(Some(1000)),
999.into(),
dummy_request(),
None,
)
.await;
store
.store_task_result(
&created.task_id,
TaskStatus::Completed,
serde_json::json!({}),
None,
)
.await;
advance_time_ms(10001).await;
let task = store.get_task(&created.task_id, None).await;
assert!(task.is_none());
resume();
}
#[tokio::test]
async fn all_terminal_states_expire() {
pause();
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let working = store
.create_task(
create_options(Some(1000)),
1001.into(),
dummy_request(),
None,
)
.await;
let completed = store
.create_task(
create_options(Some(1000)),
1002.into(),
dummy_request(),
None,
)
.await;
store
.store_task_result(
&completed.task_id,
TaskStatus::Completed,
serde_json::json!({}),
None,
)
.await;
let failed = store
.create_task(
create_options(Some(1000)),
1003.into(),
dummy_request(),
None,
)
.await;
store
.store_task_result(
&failed.task_id,
TaskStatus::Failed,
serde_json::json!({ "is_error": true }),
None,
)
.await;
let cancelled = store
.create_task(
create_options(Some(1000)),
1004.into(),
dummy_request(),
None,
)
.await;
store
.update_task_status(&cancelled.task_id, TaskStatus::Cancelled, None, None)
.await;
advance_time_ms(10001).await;
assert!(store.get_task(&working.task_id, None).await.is_none());
assert!(store.get_task(&completed.task_id, None).await.is_none());
assert!(store.get_task(&failed.task_id, None).await.is_none());
assert!(store.get_task(&cancelled.task_id, None).await.is_none());
resume();
}
#[tokio::test]
async fn list_tasks_pagination() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(Some(3)); // page size 3
// Create 7 tasks (newest first)
for i in 0..7 {
store
.create_task(create_options(None), i.into(), dummy_request(), None)
.await;
}
let page1 = store.list_tasks(None, None).await;
assert_eq!(page1.tasks.len(), 3);
assert!(page1.next_cursor.is_some());
let page2 = store.list_tasks(page1.next_cursor, None).await;
assert_eq!(page2.tasks.len(), 3);
assert!(page2.next_cursor.is_some());
let page3 = store.list_tasks(page2.next_cursor, None).await;
assert_eq!(page3.tasks.len(), 1);
assert!(page3.next_cursor.is_none());
}
#[tokio::test]
async fn list_tasks_empty() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let result = store.list_tasks(None, None).await;
assert_eq!(result.tasks.len(), 0);
assert!(result.next_cursor.is_none());
}
#[tokio::test]
async fn pagination_respects_order_newest_first() {
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
let task1 = store
.create_task(create_options(None), 1.into(), dummy_request(), None)
.await;
let task2 = store
.create_task(create_options(None), 2.into(), dummy_request(), None)
.await;
let task3 = store
.create_task(create_options(None), 3.into(), dummy_request(), None)
.await;
let list = store.list_tasks(None, None).await;
let ids: Vec<_> = list.tasks.iter().map(|t| t.task_id.clone()).collect();
// task3 should be first (newest)
assert_eq!(ids[0], task3.task_id);
assert_eq!(ids[1], task2.task_id);
assert_eq!(ids[2], task1.task_id);
}
}
#[cfg(test)]
mod polling_tests {
use super::*;
use rust_mcp_schema::RpcError;
use serde_json::Value;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::time::{advance, pause};
fn dummy_request() -> serde_json::Value {
serde_json::json!({})
}
#[tokio::test]
async fn new_with_polling_initializes_polling_schedule() {
pause();
let store = InMemoryTaskStore::<serde_json::Value, serde_json::Value>::new(None);
store
.start_task_polling(Box::new(|_task_id, _| {
Box::pin(async { Ok((TaskStatus::Working, Some(500))) })
}))
.unwrap();
let created = store
.create_task(
CreateTaskOptions {
ttl: None,
poll_interval: Some(500),
meta: None,
},
1.into(),
dummy_request(),
None,
)
| rust | MIT | c25994d3d800242c1413b7401432f2a5bef3c23f | 2026-01-04T20:25:10.242745Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.