{"repo_name": "rust-sdk", "file_name": "/rust-sdk/crates/rmcp/src/transport/auth.rs", "inference_info": {"prefix_code": "use std::{\n collections::HashMap,\n sync::Arc,\n time::{Duration, Instant},\n};\n\nuse oauth2::{\n AuthUrl, AuthorizationCode, ClientId, ClientSecret, CsrfToken, EmptyExtraTokenFields,\n PkceCodeChallenge, PkceCodeVerifier, RedirectUrl, RefreshToken, Scope, StandardTokenResponse,\n TokenResponse, TokenUrl,\n basic::{BasicClient, BasicTokenType},\n};\nuse reqwest::{Client as HttpClient, IntoUrl, StatusCode, Url, header::AUTHORIZATION};\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tokio::sync::{Mutex, RwLock};\nuse tracing::{debug, error};\n\n/// sse client with oauth2 authorization\n#[derive(Clone)]\npub struct AuthClient {\n pub http_client: C,\n pub auth_manager: Arc>,\n}\n\nimpl std::fmt::Debug for AuthClient {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"AuthorizedClient\")\n .field(\"http_client\", &self.http_client)\n .field(\"auth_manager\", &\"...\")\n .finish()\n }\n}\n\nimpl AuthClient {\n /// create new authorized sse client\n pub fn new(http_client: C, auth_manager: AuthorizationManager) -> Self {\n Self {\n http_client,\n auth_manager: Arc::new(Mutex::new(auth_manager)),\n }\n }\n}\n\nimpl AuthClient {\n pub fn get_access_token(&self) -> impl Future> + Send {\n let auth_manager = self.auth_manager.clone();\n async move { auth_manager.lock().await.get_access_token().await }\n }\n}\n\n/// Auth error\n#[derive(Debug, Error)]\npub enum AuthError {\n #[error(\"OAuth authorization required\")]\n AuthorizationRequired,\n\n #[error(\"OAuth authorization failed: {0}\")]\n AuthorizationFailed(String),\n\n #[error(\"OAuth token exchange failed: {0}\")]\n TokenExchangeFailed(String),\n\n #[error(\"OAuth token refresh failed: {0}\")]\n TokenRefreshFailed(String),\n\n #[error(\"HTTP error: {0}\")]\n HttpError(#[from] reqwest::Error),\n\n #[error(\"OAuth error: {0}\")]\n OAuthError(String),\n\n #[error(\"Metadata error: {0}\")]\n MetadataError(String),\n\n #[error(\"URL parse error: {0}\")]\n UrlError(#[from] url::ParseError),\n\n #[error(\"No authorization support detected\")]\n NoAuthorizationSupport,\n\n #[error(\"Internal error: {0}\")]\n InternalError(String),\n\n #[error(\"Invalid token type: {0}\")]\n InvalidTokenType(String),\n\n #[error(\"Token expired\")]\n TokenExpired,\n\n #[error(\"Invalid scope: {0}\")]\n InvalidScope(String),\n\n #[error(\"Registration failed: {0}\")]\n RegistrationFailed(String),\n}\n\n/// oauth2 metadata\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct AuthorizationMetadata {\n pub authorization_endpoint: String,\n pub token_endpoint: String,\n pub registration_endpoint: String,\n pub issuer: Option,\n pub jwks_uri: Option,\n pub scopes_supported: Option>,\n // allow additional fields\n #[serde(flatten)]\n pub additional_fields: HashMap,\n}\n\n/// oauth2 client config\n#[derive(Debug, Clone)]\npub struct OAuthClientConfig {\n pub client_id: String,\n pub client_secret: Option,\n pub scopes: Vec,\n pub redirect_uri: String,\n}\n\n// add type aliases for oauth2 types\ntype OAuthErrorResponse = oauth2::StandardErrorResponse;\ntype OAuthTokenResponse = StandardTokenResponse;\ntype OAuthTokenIntrospection =\n oauth2::StandardTokenIntrospectionResponse;\ntype OAuthRevocableToken = oauth2::StandardRevocableToken;\ntype OAuthRevocationError = oauth2::StandardErrorResponse;\ntype OAuthClient = oauth2::Client<\n OAuthErrorResponse,\n OAuthTokenResponse,\n OAuthTokenIntrospection,\n OAuthRevocableToken,\n OAuthRevocationError,\n oauth2::EndpointSet,\n oauth2::EndpointNotSet,\n oauth2::EndpointNotSet,\n oauth2::EndpointNotSet,\n oauth2::EndpointSet,\n>;\ntype Credentials = (String, Option);\n\n/// oauth2 auth manager\npub struct AuthorizationManager {\n http_client: HttpClient,\n metadata: Option,\n oauth_client: Option,\n credentials: RwLock>,\n pkce_verifier: RwLock>,\n expires_at: RwLock>,\n base_url: Url,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ClientRegistrationRequest {\n pub client_name: String,\n pub redirect_uris: Vec,\n pub grant_types: Vec,\n pub token_endpoint_auth_method: String,\n pub response_types: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ClientRegistrationResponse {\n pub client_id: String,\n pub client_secret: Option,\n pub client_name: String,\n pub redirect_uris: Vec,\n // allow additional fields\n #[serde(flatten)]\n pub additional_fields: HashMap,\n}\n\nimpl AuthorizationManager {\n /// create new auth manager with base url\n pub async fn new(base_url: U) -> Result {\n let base_url = base_url.into_url()?;\n let http_client = HttpClient::builder()\n .timeout(Duration::from_secs(30))\n .build()\n .map_err(|e| AuthError::InternalError(e.to_string()))?;\n\n let manager = Self {\n http_client,\n metadata: None,\n oauth_client: None,\n credentials: RwLock::new(None),\n pkce_verifier: RwLock::new(None),\n expires_at: RwLock::new(None),\n base_url,\n };\n\n Ok(manager)\n }\n\n pub fn with_client(&mut self, http_client: HttpClient) -> Result<(), AuthError> {\n self.http_client = http_client;\n Ok(())\n }\n\n /// discover oauth2 metadata\n pub async fn discover_metadata(&self) -> Result {\n // according to the specification, the metadata should be located at \"/.well-known/oauth-authorization-server\"\n let mut discovery_url = self.base_url.clone();\n discovery_url.set_path(\"/.well-known/oauth-authorization-server\");\n debug!(\"discovery url: {:?}\", discovery_url);\n let response = self\n .http_client\n .get(discovery_url)\n .header(\"MCP-Protocol-Version\", \"2024-11-05\")\n .send()\n .await?;\n\n if response.status() == StatusCode::OK {\n let metadata = response\n .json::()\n .await\n .map_err(|e| {\n AuthError::MetadataError(format!(\"Failed to parse metadata: {}\", e))\n })?;\n debug!(\"metadata: {:?}\", metadata);\n Ok(metadata)\n } else {\n // fallback to default endpoints\n let mut auth_base = self.base_url.clone();\n // discard the path part, only keep scheme, host, port\n auth_base.set_path(\"\");\n\n Ok(AuthorizationMetadata {\n authorization_endpoint: format!(\"{}/authorize\", auth_base),\n token_endpoint: format!(\"{}/token\", auth_base),\n registration_endpoint: format!(\"{}/register\", auth_base),\n issuer: None,\n jwks_uri: None,\n scopes_supported: None,\n additional_fields: HashMap::new(),\n })\n }\n }\n\n /// get client id and credentials\n pub async fn get_credentials(&self) -> Result {\n let credentials = self.credentials.read().await;\n let client_id = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?\n .client_id();\n Ok((client_id.to_string(), credentials.clone()))\n }\n\n /// configure oauth2 client with client credentials\n pub fn configure_client(&mut self, config: OAuthClientConfig) -> Result<(), AuthError> {\n if self.metadata.is_none() {\n return Err(AuthError::NoAuthorizationSupport);\n }\n\n let metadata = self.metadata.as_ref().unwrap();\n\n let auth_url = AuthUrl::new(metadata.authorization_endpoint.clone())\n .map_err(|e| AuthError::OAuthError(format!(\"Invalid authorization URL: {}\", e)))?;\n\n let token_url = TokenUrl::new(metadata.token_endpoint.clone())\n .map_err(|e| AuthError::OAuthError(format!(\"Invalid token URL: {}\", e)))?;\n\n // debug!(\"token url: {:?}\", token_url);\n let client_id = ClientId::new(config.client_id);\n let redirect_url = RedirectUrl::new(config.redirect_uri.clone())\n .map_err(|e| AuthError::OAuthError(format!(\"Invalid re URL: {}\", e)))?;\n\n debug!(\"client_id: {:?}\", client_id);\n let mut client_builder = BasicClient::new(client_id.clone())\n .set_auth_uri(auth_url)\n .set_token_uri(token_url)\n .set_redirect_uri(redirect_url);\n\n if let Some(secret) = config.client_secret {\n client_builder = client_builder.set_client_secret(ClientSecret::new(secret));\n }\n\n self.oauth_client = Some(client_builder);\n Ok(())\n }\n\n /// dynamic register oauth2 client\n pub async fn register_client(\n &mut self,\n name: &str,\n redirect_uri: &str,\n ) -> Result {\n if self.metadata.is_none() {\n error!(\"No authorization support detected\");\n return Err(AuthError::NoAuthorizationSupport);\n }\n\n let metadata = self.metadata.as_ref().unwrap();\n let registration_url = metadata.registration_endpoint.clone();\n\n debug!(\"registration url: {:?}\", registration_url);\n // prepare registration request\n let registration_request = ClientRegistrationRequest {\n client_name: name.to_string(),\n redirect_uris: vec![redirect_uri.to_string()],\n grant_types: vec![\n \"authorization_code\".to_string(),\n \"refresh_token\".to_string(),\n ],\n token_endpoint_auth_method: \"none\".to_string(), // public client\n response_types: vec![\"code\".to_string()],\n };\n\n debug!(\"registration request: {:?}\", registration_request);\n\n let response = match self\n .http_client\n .post(registration_url)\n .json(®istration_request)\n .send()\n .await\n {\n Ok(response) => response,\n Err(e) => {\n error!(\"Registration request failed: {}\", e);\n return Err(AuthError::RegistrationFailed(format!(\n \"HTTP request error: {}\",\n e\n )));\n }\n };\n\n if !response.status().is_success() {\n let status = response.status();\n let error_text = match response.text().await {\n Ok(text) => text,\n Err(_) => \"cannot get error details\".to_string(),\n };\n\n error!(\"Registration failed: HTTP {} - {}\", status, error_text);\n return Err(AuthError::RegistrationFailed(format!(\n \"HTTP {}: {}\",\n status, error_text\n )));\n }\n\n debug!(\"registration response: {:?}\", response);\n let reg_response = match response.json::().await {\n Ok(response) => response,\n Err(e) => {\n error!(\"Failed to parse registration response: {}\", e);\n return Err(AuthError::RegistrationFailed(format!(\n \"analyze response error: {}\",\n e\n )));\n }\n };\n\n let config = OAuthClientConfig {\n client_id: reg_response.client_id,\n client_secret: reg_response.client_secret,\n redirect_uri: redirect_uri.to_string(),\n scopes: vec![],\n };\n\n self.configure_client(config.clone())?;\n Ok(config)\n }\n\n /// use provided client id to configure oauth2 client instead of dynamic registration\n /// this is useful when you have a stored client id from previous registration\n pub fn configure_client_id(&mut self, client_id: &str) -> Result<(), AuthError> {\n let config = OAuthClientConfig {\n client_id: client_id.to_string(),\n client_secret: None,\n scopes: vec![],\n redirect_uri: self.base_url.to_string(),\n };\n self.configure_client(config)\n }\n\n /// generate authorization url\n pub async fn get_authorization_url(&self, scopes: &[&str]) -> Result {\n let oauth_client = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?;\n\n // generate pkce challenge\n let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256();\n\n // build authorization request\n let mut auth_request = oauth_client\n .authorize_url(CsrfToken::new_random)\n .set_pkce_challenge(pkce_challenge);\n\n // add request scopes\n for scope in scopes {\n auth_request = auth_request.add_scope(Scope::new(scope.to_string()));\n }\n\n let (auth_url, _csrf_token) = auth_request.url();\n\n // store pkce verifier for later use\n *self.pkce_verifier.write().await = Some(pkce_verifier);\n debug!(\"set pkce verifier: {:?}\", self.pkce_verifier.read().await);\n\n Ok(auth_url.to_string())\n }\n\n /// exchange authorization code for access token\n pub async fn exchange_code_for_token(\n &self,\n code: &str,\n ) -> Result, AuthError> {\n debug!(\"start exchange code for token: {:?}\", code);\n let oauth_client = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?;\n\n let pkce_verifier = self\n .pkce_verifier\n .write()\n .await\n .take()\n .ok_or_else(|| AuthError::InternalError(\"PKCE verifier not found\".to_string()))?;\n\n let http_client = reqwest::ClientBuilder::new()\n .redirect(reqwest::redirect::Policy::none())\n .build()\n .map_err(|e| AuthError::InternalError(e.to_string()))?;\n debug!(\"client_id: {:?}\", oauth_client.client_id());\n\n // exchange token\n let token_result = oauth_client\n .exchange_code(AuthorizationCode::new(code.to_string()))\n .set_pkce_verifier(pkce_verifier)\n .request_async(&http_client)\n .await\n .map_err(|e| AuthError::TokenExchangeFailed(e.to_string()))?;\n\n // get expires_in from token response\n let expires_in = token_result.expires_in();\n if let Some(expires_in) = expires_in {\n let expires_at = Instant::now() + expires_in;\n *self.expires_at.write().await = Some(expires_at);\n }\n debug!(\"exchange token result: {:?}\", token_result);\n // store credentials\n *self.credentials.write().await = Some(token_result.clone());\n\n Ok(token_result)\n }\n\n /// get access token, if expired, refresh it automatically\n pub async fn get_access_token(&self) -> Result {\n let credentials = self.credentials.read().await;\n\n if let Some(creds) = credentials.as_ref() {\n // check if the token is expire\n if let Some(expires_at) = *self.expires_at.read().await {\n if expires_at < Instant::now() {\n // token expired, try to refresh , release the lock\n drop(credentials);\n let new_creds = self.refresh_token().await?;\n return Ok(new_creds.access_token().secret().to_string());\n }\n }\n\n Ok(creds.access_token().secret().to_string())\n } else {\n Err(AuthError::AuthorizationRequired)\n }\n }\n\n /// refresh access token\n pub async fn refresh_token(\n &self,\n ) -> Result, AuthError> {\n let oauth_client = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?;\n\n let current_credentials = self\n .credentials\n .read()\n .await\n .clone()\n .ok_or_else(|| AuthError::AuthorizationRequired)?;\n\n let refresh_token = current_credentials.refresh_token().ok_or_else(|| {\n AuthError::TokenRefreshFailed(\"No refresh token available\".to_string())\n })?;\n debug!(\"refresh token: {:?}\", refresh_token);\n // refresh token\n let token_result = oauth_client\n .exchange_refresh_token(&RefreshToken::new(refresh_token.secret().to_string()))\n .request_async(&self.http_client)\n .await\n .map_err(|e| AuthError::TokenRefreshFailed(e.to_string()))?;\n\n // store new credentials\n *self.credentials.write().await = Some(token_result.clone());\n\n // get expires_in from token response\n let expires_in = token_result.expires_in();\n if let Some(expires_in) = expires_in {\n let expires_at = Instant::now() + expires_in;\n *self.expires_at.write().await = Some(expires_at);\n }\n Ok(token_result)\n }\n\n /// prepare request, add authorization header\n pub async fn prepare_request(\n &self,\n request: reqwest::RequestBuilder,\n ) -> Result {\n let token = self.get_access_token().await?;\n Ok(request.header(AUTHORIZATION, format!(\"Bearer {}\", token)))\n }\n\n /// handle response, check if need to re-authorize\n pub async fn handle_response(\n &self,\n response: reqwest::Response,\n ) -> Result {\n if response.status() == StatusCode::UNAUTHORIZED {\n // 401 Unauthorized, need to re-authorize\n Err(AuthError::AuthorizationRequired)\n } else {\n Ok(response)\n }\n }\n}\n\n/// oauth2 authorization session, for guiding user to complete the authorization process\npub struct AuthorizationSession {\n pub auth_manager: AuthorizationManager,\n pub auth_url: String,\n pub redirect_uri: String,\n}\n\nimpl AuthorizationSession {\n /// create new authorization session\n pub async fn new(\n mut auth_manager: AuthorizationManager,\n scopes: &[&str],\n redirect_uri: &str,\n ) -> Result {\n // set redirect uri\n let config = OAuthClientConfig {\n client_id: \"mcp-client\".to_string(), // temporary id, will be updated by dynamic registration\n client_secret: None,\n scopes: scopes.iter().map(|s| s.to_string()).collect(),\n redirect_uri: redirect_uri.to_string(),\n };\n\n // try to dynamic register client\n let config = match auth_manager\n .register_client(\"MCP Client\", redirect_uri)\n .await\n {\n Ok(config) => config,\n Err(e) => {\n eprintln!(\"Dynamic registration failed: {}\", e);\n // fallback to default config\n config\n }\n };\n // reset client config\n auth_manager.configure_client(config)?;\n let auth_url = auth_manager.get_authorization_url(scopes).await?;\n\n Ok(Self {\n auth_manager,\n auth_url,\n redirect_uri: redirect_uri.to_string(),\n })\n }\n\n /// get client_id and credentials\n pub async fn get_credentials(&self) -> Result {\n self.auth_manager.get_credentials().await\n }\n\n /// get authorization url\n pub fn get_authorization_url(&self) -> &str {\n &self.auth_url\n }\n\n /// handle authorization code callback\n pub async fn handle_callback(\n &self,\n code: &str,\n ) -> Result, AuthError> {\n self.auth_manager.exchange_code_for_token(code).await\n }\n}\n\n/// http client extension, automatically add authorization header\npub struct AuthorizedHttpClient {\n auth_manager: Arc,\n inner_client: HttpClient,\n}\n\nimpl AuthorizedHttpClient {\n /// create new authorized http client\n pub fn new(auth_manager: Arc, client: Option) -> Self {\n let inner_client = client.unwrap_or_default();\n Self {\n auth_manager,\n inner_client,\n }\n }\n\n /// send authorized request\n pub async fn request(\n &self,\n method: reqwest::Method,\n url: U,\n ) -> Result {\n let request = self.inner_client.request(method, url);\n self.auth_manager.prepare_request(request).await\n }\n\n /// send get request\n pub async fn get(&self, url: U) -> Result {\n let request = self.request(reqwest::Method::GET, url).await?;\n let response = request.send().await?;\n self.auth_manager.handle_response(response).await\n }\n\n /// send post request\n pub async fn post(&self, url: U) -> Result {\n self.request(reqwest::Method::POST, url).await\n }\n}\n\n/// OAuth state machine\n/// Use the OAuthState to manage the OAuth client is more recommend\n/// But also you can use the AuthorizationManager,AuthorizationSession,AuthorizedHttpClient directly\npub enum OAuthState {\n /// the AuthorizationManager\n Unauthorized(AuthorizationManager),\n /// the AuthorizationSession\n Session(AuthorizationSession),\n /// the authd AuthorizationManager\n Authorized(AuthorizationManager),\n /// the authd http client\n AuthorizedHttpClient(AuthorizedHttpClient),\n}\n\nimpl OAuthState {\n /// Create new OAuth state machine\n pub async fn new(\n base_url: U,\n client: Option,\n ) -> Result {\n let mut manager = AuthorizationManager::new(base_url).await?;\n if let Some(client) = client {\n manager.with_client(client)?;\n }\n\n Ok(OAuthState::Unauthorized(manager))\n }\n\n /// Get client_id and OAuth credentials\n pub async fn get_credentials(&self) -> Result {\n // return client_id and credentials\n match self {\n OAuthState::Unauthorized(manager) | OAuthState::Authorized(manager) => {\n manager.get_credentials().await\n }\n OAuthState::Session(session) => session.get_credentials().await,\n OAuthState::AuthorizedHttpClient(client) => client.auth_manager.get_credentials().await,\n }\n }\n\n /// Manually set credentials and move into authorized state\n /// Useful if you're caching credentials externally and wish to reuse them\n pub async fn set_credentials(\n &mut self,\n client_id: &str,\n credentials: OAuthTokenResponse,\n ) -> Result<(), AuthError> {\n if let OAuthState::Unauthorized(manager) = self {\n let mut manager = std::mem::replace(\n manager,\n AuthorizationManager::new(\"http://localhost\").await?,\n );\n\n // write credentials\n *manager.credentials.write().await = Some(credentials);\n\n // discover metadata\n let metadata = manager.discover_metadata().await?;\n manager.metadata = Some(metadata);\n\n // set client id and secret\n manager.configure_client_id(client_id)?;\n\n *self = OAuthState::Authorized(manager);\n Ok(())\n } else {\n Err(AuthError::InternalError(\n \"Cannot set credentials in this state\".to_string(),\n ))\n }\n }\n\n /// start authorization\n pub async fn start_authorization(\n &mut self,\n scopes: &[&str],\n redirect_uri: &str,\n ) -> Result<(), AuthError> {\n if let OAuthState::Unauthorized(mut manager) = std::mem::replace(\n self,\n OAuthState::Unauthorized(AuthorizationManager::new(\"http://localhost\").await?),\n ) {\n debug!(\"start discovery\");\n let metadata = manager.discover_metadata().await?;\n manager.metadata = Some(metadata);\n debug!(\"start session\");\n let session = AuthorizationSession::new(manager, scopes, redirect_uri).await?;\n *self = OAuthState::Session(session);\n Ok(())\n } else {\n Err(AuthError::InternalError(\n \"Already in session state\".to_string(),\n ))\n }\n }\n\n /// complete authorization\n pub async fn complete_authorization(&mut self) -> Result<(), AuthError> {\n if let OAuthState::Session(session) = std::mem::replace(\n self,\n OAuthState::Unauthorized(AuthorizationManager::new(\"http://localhost\").await?),\n ) {\n *self = OAuthState::Authorized(session.auth_manager);\n Ok(())\n } else {\n Err(AuthError::InternalError(\"Not in session state\".to_string()))\n }\n }\n /// covert to authorized http client\n pub async fn to_authorized_http_client(&mut self) -> Result<(), AuthError> {\n if let OAuthState::Authorized(manager) = std::mem::replace(\n self,\n OAuthState::Authorized(AuthorizationManager::new(\"http://localhost\").await?),\n ) {\n *self = OAuthState::AuthorizedHttpClient(AuthorizedHttpClient::new(\n Arc::new(manager),\n None,\n ));\n Ok(())\n } else {\n Err(AuthError::InternalError(\n \"Not in authorized state\".to_string(),\n ))\n }\n }\n /// get current authorization url\n pub async fn get_authorization_url(&self) -> Result {\n match self {\n OAuthState::Session(session) => Ok(session.get_authorization_url().to_string()),\n OAuthState::Unauthorized(_) => {\n Err(AuthError::InternalError(\"Not in session state\".to_string()))\n }\n OAuthState::Authorized(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }\n\n /// handle authorization callback\n pub async fn handle_callback(&mut self, code: &str) -> Result<(), AuthError> {\n match self {\n OAuthState::Session(session) => {\n session.handle_callback(code).await?;\n self.complete_authorization().await\n }\n OAuthState::Unauthorized(_) => {\n Err(AuthError::InternalError(\"Not in session state\".to_string()))\n }\n OAuthState::Authorized(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }\n\n /// get access token\n ", "suffix_code": "\n\n /// refresh access token\n pub async fn refresh_token(&self) -> Result<(), AuthError> {\n match self {\n OAuthState::Unauthorized(_) => {\n Err(AuthError::InternalError(\"Not in manager state\".to_string()))\n }\n OAuthState::Session(_) => {\n Err(AuthError::InternalError(\"Not in manager state\".to_string()))\n }\n OAuthState::Authorized(manager) => {\n manager.refresh_token().await?;\n Ok(())\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }\n\n pub fn into_authorization_manager(self) -> Option {\n match self {\n OAuthState::Authorized(manager) => Some(manager),\n _ => None,\n }\n }\n}\n", "middle_code": "pub async fn get_access_token(&self) -> Result {\n match self {\n OAuthState::Unauthorized(manager) => manager.get_access_token().await,\n OAuthState::Session(_) => {\n Err(AuthError::InternalError(\"Not in manager state\".to_string()))\n }\n OAuthState::Authorized(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "rust", "sub_task_type": null}, "context_code": [["/rust-sdk/crates/rmcp/src/transport/sse_client.rs", "//! reference: https://html.spec.whatwg.org/multipage/server-sent-events.html\nuse std::{pin::Pin, sync::Arc};\n\nuse futures::{StreamExt, future::BoxFuture};\nuse http::Uri;\nuse reqwest::header::HeaderValue;\nuse sse_stream::Error as SseError;\nuse thiserror::Error;\n\nuse super::{\n Transport,\n common::client_side_sse::{BoxedSseResponse, SseRetryPolicy, SseStreamReconnect},\n};\nuse crate::{\n RoleClient,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::common::client_side_sse::SseAutoReconnectStream,\n};\n\n#[derive(Error, Debug)]\npub enum SseTransportError {\n #[error(\"SSE error: {0}\")]\n Sse(#[from] SseError),\n #[error(\"IO error: {0}\")]\n Io(#[from] std::io::Error),\n #[error(\"Client error: {0}\")]\n Client(E),\n #[error(\"unexpected end of stream\")]\n UnexpectedEndOfStream,\n #[error(\"Unexpected content type: {0:?}\")]\n UnexpectedContentType(Option),\n #[cfg(feature = \"auth\")]\n #[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\n #[error(\"Auth error: {0}\")]\n Auth(#[from] crate::transport::auth::AuthError),\n #[error(\"Invalid uri: {0}\")]\n InvalidUri(#[from] http::uri::InvalidUri),\n #[error(\"Invalid uri parts: {0}\")]\n InvalidUriParts(#[from] http::uri::InvalidUriParts),\n}\n\nimpl From for SseTransportError {\n fn from(e: reqwest::Error) -> Self {\n SseTransportError::Client(e)\n }\n}\n\npub trait SseClient: Clone + Send + Sync + 'static {\n type Error: std::error::Error + Send + Sync + 'static;\n fn post_message(\n &self,\n uri: Uri,\n message: ClientJsonRpcMessage,\n auth_token: Option,\n ) -> impl Future>> + Send + '_;\n fn get_stream(\n &self,\n uri: Uri,\n last_event_id: Option,\n auth_token: Option,\n ) -> impl Future>> + Send + '_;\n}\n\nstruct SseClientReconnect {\n pub client: C,\n pub uri: Uri,\n}\n\nimpl SseStreamReconnect for SseClientReconnect {\n type Error = SseTransportError;\n type Future = BoxFuture<'static, Result>;\n fn retry_connection(&mut self, last_event_id: Option<&str>) -> Self::Future {\n let client = self.client.clone();\n let uri = self.uri.clone();\n let last_event_id = last_event_id.map(|s| s.to_owned());\n Box::pin(async move { client.get_stream(uri, last_event_id, None).await })\n }\n}\ntype ServerMessageStream = Pin>>>;\npub struct SseClientTransport {\n client: C,\n config: SseClientConfig,\n message_endpoint: Uri,\n stream: Option>,\n}\n\nimpl Transport for SseClientTransport {\n type Error = SseTransportError;\n async fn receive(&mut self) -> Option {\n self.stream.as_mut()?.next().await?.ok()\n }\n fn send(\n &mut self,\n item: crate::service::TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let client = self.client.clone();\n let uri = self.message_endpoint.clone();\n async move { client.post_message(uri, item, None).await }\n }\n async fn close(&mut self) -> Result<(), Self::Error> {\n self.stream.take();\n Ok(())\n }\n}\n\nimpl std::fmt::Debug for SseClientTransport {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"SseClientWorker\")\n .field(\"client\", &self.client)\n .field(\"config\", &self.config)\n .finish()\n }\n}\n\nimpl SseClientTransport {\n pub async fn start_with_client(\n client: C,\n config: SseClientConfig,\n ) -> Result> {\n let sse_endpoint = config.sse_endpoint.as_ref().parse::()?;\n\n let mut sse_stream = client.get_stream(sse_endpoint.clone(), None, None).await?;\n let message_endpoint = if let Some(endpoint) = config.use_message_endpoint.clone() {\n let ep = endpoint.parse::()?;\n let mut sse_endpoint_parts = sse_endpoint.clone().into_parts();\n sse_endpoint_parts.path_and_query = ep.into_parts().path_and_query;\n Uri::from_parts(sse_endpoint_parts)?\n } else {\n // wait the endpoint event\n loop {\n let sse = sse_stream\n .next()\n .await\n .ok_or(SseTransportError::UnexpectedEndOfStream)??;\n let Some(\"endpoint\") = sse.event.as_deref() else {\n continue;\n };\n let ep = sse.data.unwrap_or_default();\n\n break message_endpoint(sse_endpoint.clone(), ep)?;\n }\n };\n\n let stream = Box::pin(SseAutoReconnectStream::new(\n sse_stream,\n SseClientReconnect {\n client: client.clone(),\n uri: sse_endpoint.clone(),\n },\n config.retry_policy.clone(),\n ));\n Ok(Self {\n client,\n config,\n message_endpoint,\n stream: Some(stream),\n })\n }\n}\n\nfn message_endpoint(base: http::Uri, endpoint: String) -> Result {\n // If endpoint is a full URL, parse and return it directly\n if endpoint.starts_with(\"http://\") || endpoint.starts_with(\"https://\") {\n return endpoint.parse::();\n }\n\n let mut base_parts = base.into_parts();\n let endpoint_clone = endpoint.clone();\n\n if endpoint.starts_with(\"?\") {\n // Query only - keep base path and append query\n if let Some(base_path_and_query) = &base_parts.path_and_query {\n let base_path = base_path_and_query.path();\n base_parts.path_and_query = Some(format!(\"{}{}\", base_path, endpoint).parse()?);\n } else {\n base_parts.path_and_query = Some(format!(\"/{}\", endpoint).parse()?);\n }\n } else {\n // Path (with optional query) - replace entire path_and_query\n let path_to_use = if endpoint.starts_with(\"/\") {\n endpoint // Use absolute path as-is\n } else {\n format!(\"/{}\", endpoint) // Make relative path absolute\n };\n base_parts.path_and_query = Some(path_to_use.parse()?);\n }\n\n http::Uri::from_parts(base_parts).map_err(|_| endpoint_clone.parse::().unwrap_err())\n}\n\n#[derive(Debug, Clone)]\npub struct SseClientConfig {\n /// client sse endpoint\n ///\n /// # How this client resolve the message endpoint\n /// if sse_endpoint has this format: ``,\n /// then the message endpoint will be ``.\n ///\n /// For example, if you config the sse_endpoint as `http://example.com/some_path/sse`,\n /// and the server send the message endpoint event as `message?session_id=123`,\n /// then the message endpoint will be `http://example.com/message`.\n ///\n /// This follow the rules of JavaScript's [`new URL(url, base)`](https://developer.mozilla.org/zh-CN/docs/Web/API/URL/URL)\n pub sse_endpoint: Arc,\n pub retry_policy: Arc,\n /// if this is settled, the client will use this endpoint to send message and skip get the endpoint event\n pub use_message_endpoint: Option,\n}\n\nimpl Default for SseClientConfig {\n fn default() -> Self {\n Self {\n sse_endpoint: \"\".into(),\n retry_policy: Arc::new(super::common::client_side_sse::FixedInterval::default()),\n use_message_endpoint: None,\n }\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_message_endpoint() {\n let base_url = \"https://localhost/sse\".parse::().unwrap();\n\n // Query only\n let result = message_endpoint(base_url.clone(), \"?sessionId=x\".to_string()).unwrap();\n assert_eq!(result.to_string(), \"https://localhost/sse?sessionId=x\");\n\n // Relative path with query\n let result = message_endpoint(base_url.clone(), \"mypath?sessionId=x\".to_string()).unwrap();\n assert_eq!(result.to_string(), \"https://localhost/mypath?sessionId=x\");\n\n // Absolute path with query\n let result = message_endpoint(base_url.clone(), \"/xxx?sessionId=x\".to_string()).unwrap();\n assert_eq!(result.to_string(), \"https://localhost/xxx?sessionId=x\");\n\n // Full URL\n let result = message_endpoint(\n base_url.clone(),\n \"http://example.com/xxx?sessionId=x\".to_string(),\n )\n .unwrap();\n assert_eq!(result.to_string(), \"http://example.com/xxx?sessionId=x\");\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_client.rs", "use std::{borrow::Cow, sync::Arc, time::Duration};\n\nuse futures::{Stream, StreamExt, future::BoxFuture, stream::BoxStream};\npub use sse_stream::Error as SseError;\nuse sse_stream::Sse;\nuse thiserror::Error;\nuse tokio_util::sync::CancellationToken;\n\nuse super::common::client_side_sse::{ExponentialBackoff, SseRetryPolicy, SseStreamReconnect};\nuse crate::{\n RoleClient,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::{\n common::client_side_sse::SseAutoReconnectStream,\n worker::{Worker, WorkerQuitReason, WorkerSendRequest, WorkerTransport},\n },\n};\n\ntype BoxedSseStream = BoxStream<'static, Result>;\n\n#[derive(Error, Debug)]\npub enum StreamableHttpError {\n #[error(\"SSE error: {0}\")]\n Sse(#[from] SseError),\n #[error(\"Io error: {0}\")]\n Io(#[from] std::io::Error),\n #[error(\"Client error: {0}\")]\n Client(E),\n #[error(\"unexpected end of stream\")]\n UnexpectedEndOfStream,\n #[error(\"unexpected server response: {0}\")]\n UnexpectedServerResponse(Cow<'static, str>),\n #[error(\"Unexpected content type: {0:?}\")]\n UnexpectedContentType(Option),\n #[error(\"Server does not support SSE\")]\n SeverDoesNotSupportSse,\n #[error(\"Server does not support delete session\")]\n SeverDoesNotSupportDeleteSession,\n #[error(\"Tokio join error: {0}\")]\n TokioJoinError(#[from] tokio::task::JoinError),\n #[error(\"Deserialize error: {0}\")]\n Deserialize(#[from] serde_json::Error),\n #[error(\"Transport channel closed\")]\n TransportChannelClosed,\n #[cfg(feature = \"auth\")]\n #[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\n #[error(\"Auth error: {0}\")]\n Auth(#[from] crate::transport::auth::AuthError),\n}\n\nimpl From for StreamableHttpError {\n fn from(e: reqwest::Error) -> Self {\n StreamableHttpError::Client(e)\n }\n}\n\npub enum StreamableHttpPostResponse {\n Accepted,\n Json(ServerJsonRpcMessage, Option),\n Sse(BoxedSseStream, Option),\n}\n\nimpl std::fmt::Debug for StreamableHttpPostResponse {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n Self::Accepted => write!(f, \"Accepted\"),\n Self::Json(arg0, arg1) => f.debug_tuple(\"Json\").field(arg0).field(arg1).finish(),\n Self::Sse(_, arg1) => f.debug_tuple(\"Sse\").field(arg1).finish(),\n }\n }\n}\n\nimpl StreamableHttpPostResponse {\n pub async fn expect_initialized(\n self,\n ) -> Result<(ServerJsonRpcMessage, Option), StreamableHttpError>\n where\n E: std::error::Error + Send + Sync + 'static,\n {\n match self {\n Self::Json(message, session_id) => Ok((message, session_id)),\n Self::Sse(mut stream, session_id) => {\n let event =\n stream\n .next()\n .await\n .ok_or(StreamableHttpError::UnexpectedServerResponse(\n \"empty sse stream\".into(),\n ))??;\n let message: ServerJsonRpcMessage =\n serde_json::from_str(&event.data.unwrap_or_default())?;\n Ok((message, session_id))\n }\n _ => Err(StreamableHttpError::UnexpectedServerResponse(\n \"expect initialized, accepted\".into(),\n )),\n }\n }\n\n pub fn expect_json(self) -> Result>\n where\n E: std::error::Error + Send + Sync + 'static,\n {\n match self {\n Self::Json(message, ..) => Ok(message),\n got => Err(StreamableHttpError::UnexpectedServerResponse(\n format!(\"expect json, got {got:?}\").into(),\n )),\n }\n }\n\n pub fn expect_accepted(self) -> Result<(), StreamableHttpError>\n where\n E: std::error::Error + Send + Sync + 'static,\n {\n match self {\n Self::Accepted => Ok(()),\n got => Err(StreamableHttpError::UnexpectedServerResponse(\n format!(\"expect accepted, got {got:?}\").into(),\n )),\n }\n }\n}\n\npub trait StreamableHttpClient: Clone + Send + 'static {\n type Error: std::error::Error + Send + Sync + 'static;\n fn post_message(\n &self,\n uri: Arc,\n message: ClientJsonRpcMessage,\n session_id: Option>,\n auth_header: Option,\n ) -> impl Future>>\n + Send\n + '_;\n fn delete_session(\n &self,\n uri: Arc,\n session_id: Arc,\n auth_header: Option,\n ) -> impl Future>> + Send + '_;\n fn get_stream(\n &self,\n uri: Arc,\n session_id: Arc,\n last_event_id: Option,\n auth_header: Option,\n ) -> impl Future<\n Output = Result<\n BoxStream<'static, Result>,\n StreamableHttpError,\n >,\n > + Send\n + '_;\n}\n\npub struct RetryConfig {\n pub max_times: Option,\n pub min_duration: Duration,\n}\n\nstruct StreamableHttpClientReconnect {\n pub client: C,\n pub session_id: Arc,\n pub uri: Arc,\n}\n\nimpl SseStreamReconnect for StreamableHttpClientReconnect {\n type Error = StreamableHttpError;\n type Future = BoxFuture<'static, Result>;\n fn retry_connection(&mut self, last_event_id: Option<&str>) -> Self::Future {\n let client = self.client.clone();\n let uri = self.uri.clone();\n let session_id = self.session_id.clone();\n let last_event_id = last_event_id.map(|s| s.to_owned());\n Box::pin(async move {\n client\n .get_stream(uri, session_id, last_event_id, None)\n .await\n })\n }\n}\n\n#[derive(Debug, Clone, Default)]\npub struct StreamableHttpClientWorker {\n pub client: C,\n pub config: StreamableHttpClientTransportConfig,\n}\n\nimpl StreamableHttpClientWorker {\n pub fn new_simple(url: impl Into>) -> Self {\n Self {\n client: C::default(),\n config: StreamableHttpClientTransportConfig {\n uri: url.into(),\n ..Default::default()\n },\n }\n }\n}\n\nimpl StreamableHttpClientWorker {\n pub fn new(client: C, config: StreamableHttpClientTransportConfig) -> Self {\n Self { client, config }\n }\n}\n\nimpl StreamableHttpClientWorker {\n async fn execute_sse_stream(\n sse_stream: impl Stream>>\n + Send\n + 'static,\n sse_worker_tx: tokio::sync::mpsc::Sender,\n close_on_response: bool,\n ct: CancellationToken,\n ) -> Result<(), StreamableHttpError> {\n let mut sse_stream = std::pin::pin!(sse_stream);\n loop {\n let message = tokio::select! {\n event = sse_stream.next() => {\n event\n }\n _ = ct.cancelled() => {\n tracing::debug!(\"cancelled\");\n break;\n }\n };\n let Some(message) = message.transpose()? else {\n break;\n };\n let is_response = matches!(message, ServerJsonRpcMessage::Response(_));\n let yield_result = sse_worker_tx.send(message).await;\n if yield_result.is_err() {\n tracing::trace!(\"streamable http transport worker dropped, exiting\");\n break;\n }\n if close_on_response && is_response {\n tracing::debug!(\"got response, closing sse stream\");\n break;\n }\n }\n Ok(())\n }\n}\n\nimpl Worker for StreamableHttpClientWorker {\n type Role = RoleClient;\n type Error = StreamableHttpError;\n fn err_closed() -> Self::Error {\n StreamableHttpError::TransportChannelClosed\n }\n fn err_join(e: tokio::task::JoinError) -> Self::Error {\n StreamableHttpError::TokioJoinError(e)\n }\n fn config(&self) -> super::worker::WorkerConfig {\n super::worker::WorkerConfig {\n name: Some(\"StreamableHttpClientWorker\".into()),\n channel_buffer_capacity: self.config.channel_buffer_capacity,\n }\n }\n async fn run(\n self,\n mut context: super::worker::WorkerContext,\n ) -> Result<(), WorkerQuitReason> {\n let channel_buffer_capacity = self.config.channel_buffer_capacity;\n let (sse_worker_tx, mut sse_worker_rx) =\n tokio::sync::mpsc::channel::(channel_buffer_capacity);\n let config = self.config.clone();\n let transport_task_ct = context.cancellation_token.clone();\n let _drop_guard = transport_task_ct.clone().drop_guard();\n let WorkerSendRequest {\n responder,\n message: initialize_request,\n } = context.recv_from_handler().await?;\n let _ = responder.send(Ok(()));\n let (message, session_id) = self\n .client\n .post_message(config.uri.clone(), initialize_request, None, None)\n .await\n .map_err(WorkerQuitReason::fatal_context(\"send initialize request\"))?\n .expect_initialized::()\n .await\n .map_err(WorkerQuitReason::fatal_context(\n \"process initialize response\",\n ))?;\n let session_id: Option> = if let Some(session_id) = session_id {\n Some(session_id.into())\n } else {\n if !self.config.allow_stateless {\n return Err(WorkerQuitReason::fatal(\n \"missing session id in initialize response\",\n \"process initialize response\",\n ));\n }\n None\n };\n // delete session when drop guard is dropped\n if let Some(session_id) = &session_id {\n let ct = transport_task_ct.clone();\n let client = self.client.clone();\n let session_id = session_id.clone();\n let url = config.uri.clone();\n tokio::spawn(async move {\n ct.cancelled().await;\n let delete_session_result =\n client.delete_session(url, session_id.clone(), None).await;\n match delete_session_result {\n Ok(_) => {\n tracing::info!(session_id = session_id.as_ref(), \"delete session success\")\n }\n Err(StreamableHttpError::SeverDoesNotSupportDeleteSession) => {\n tracing::info!(\n session_id = session_id.as_ref(),\n \"server doesn't support delete session\"\n )\n }\n Err(e) => {\n tracing::error!(\n session_id = session_id.as_ref(),\n \"fail to delete session: {e}\"\n );\n }\n };\n });\n }\n\n context.send_to_handler(message).await?;\n let initialized_notification = context.recv_from_handler().await?;\n // expect a initialized response\n self.client\n .post_message(\n config.uri.clone(),\n initialized_notification.message,\n session_id.clone(),\n None,\n )\n .await\n .map_err(WorkerQuitReason::fatal_context(\n \"send initialized notification\",\n ))?\n .expect_accepted::()\n .map_err(WorkerQuitReason::fatal_context(\n \"process initialized notification response\",\n ))?;\n let _ = initialized_notification.responder.send(Ok(()));\n enum Event {\n ClientMessage(WorkerSendRequest),\n ServerMessage(ServerJsonRpcMessage),\n StreamResult(Result<(), StreamableHttpError>),\n }\n let mut streams = tokio::task::JoinSet::new();\n if let Some(session_id) = &session_id {\n match self\n .client\n .get_stream(config.uri.clone(), session_id.clone(), None, None)\n .await\n {\n Ok(stream) => {\n let sse_stream = SseAutoReconnectStream::new(\n stream,\n StreamableHttpClientReconnect {\n client: self.client.clone(),\n session_id: session_id.clone(),\n uri: config.uri.clone(),\n },\n self.config.retry_config.clone(),\n );\n streams.spawn(Self::execute_sse_stream(\n sse_stream,\n sse_worker_tx.clone(),\n false,\n transport_task_ct.child_token(),\n ));\n tracing::debug!(\"got common stream\");\n }\n Err(StreamableHttpError::SeverDoesNotSupportSse) => {\n tracing::debug!(\"server doesn't support sse, skip common stream\");\n }\n Err(e) => {\n // fail to get common stream\n tracing::error!(\"fail to get common stream: {e}\");\n return Err(WorkerQuitReason::fatal(\n \"fail to get general purpose event stream\",\n \"get general purpose event stream\",\n ));\n }\n }\n }\n loop {\n let event = tokio::select! {\n _ = transport_task_ct.cancelled() => {\n tracing::debug!(\"cancelled\");\n return Err(WorkerQuitReason::Cancelled);\n }\n message = context.recv_from_handler() => {\n let message = message?;\n Event::ClientMessage(message)\n },\n message = sse_worker_rx.recv() => {\n let Some(message) = message else {\n tracing::trace!(\"transport dropped, exiting\");\n return Err(WorkerQuitReason::HandlerTerminated);\n };\n Event::ServerMessage(message)\n },\n terminated_stream = streams.join_next(), if !streams.is_empty() => {\n match terminated_stream {\n Some(result) => {\n Event::StreamResult(result.map_err(StreamableHttpError::TokioJoinError).and_then(std::convert::identity))\n }\n None => {\n continue\n }\n }\n }\n };\n match event {\n Event::ClientMessage(send_request) => {\n let WorkerSendRequest { message, responder } = send_request;\n let response = self\n .client\n .post_message(config.uri.clone(), message, session_id.clone(), None)\n .await;\n let send_result = match response {\n Err(e) => Err(e),\n Ok(StreamableHttpPostResponse::Accepted) => {\n tracing::trace!(\"client message accepted\");\n Ok(())\n }\n Ok(StreamableHttpPostResponse::Json(message, ..)) => {\n context.send_to_handler(message).await?;\n Ok(())\n }\n Ok(StreamableHttpPostResponse::Sse(stream, ..)) => {\n if let Some(session_id) = &session_id {\n let sse_stream = SseAutoReconnectStream::new(\n stream,\n StreamableHttpClientReconnect {\n client: self.client.clone(),\n session_id: session_id.clone(),\n uri: config.uri.clone(),\n },\n self.config.retry_config.clone(),\n );\n streams.spawn(Self::execute_sse_stream(\n sse_stream,\n sse_worker_tx.clone(),\n true,\n transport_task_ct.child_token(),\n ));\n } else {\n let sse_stream = SseAutoReconnectStream::never_reconnect(\n stream,\n StreamableHttpError::::UnexpectedEndOfStream,\n );\n streams.spawn(Self::execute_sse_stream(\n sse_stream,\n sse_worker_tx.clone(),\n true,\n transport_task_ct.child_token(),\n ));\n }\n tracing::trace!(\"got new sse stream\");\n Ok(())\n }\n };\n let _ = responder.send(send_result);\n }\n Event::ServerMessage(json_rpc_message) => {\n // send the message to the handler\n context.send_to_handler(json_rpc_message).await?;\n }\n Event::StreamResult(result) => {\n if result.is_err() {\n tracing::warn!(\n \"sse client event stream terminated with error: {:?}\",\n result\n );\n }\n }\n }\n }\n }\n}\n\npub type StreamableHttpClientTransport = WorkerTransport>;\n\nimpl StreamableHttpClientTransport {\n pub fn with_client(client: C, config: StreamableHttpClientTransportConfig) -> Self {\n let worker = StreamableHttpClientWorker::new(client, config);\n WorkerTransport::spawn(worker)\n }\n}\n#[derive(Debug, Clone)]\npub struct StreamableHttpClientTransportConfig {\n pub uri: Arc,\n pub retry_config: Arc,\n pub channel_buffer_capacity: usize,\n /// if true, the transport will not require a session to be established\n pub allow_stateless: bool,\n}\n\nimpl StreamableHttpClientTransportConfig {\n pub fn with_uri(uri: impl Into>) -> Self {\n Self {\n uri: uri.into(),\n ..Default::default()\n }\n }\n}\n\nimpl Default for StreamableHttpClientTransportConfig {\n fn default() -> Self {\n Self {\n uri: \"localhost\".into(),\n retry_config: Arc::new(ExponentialBackoff::default()),\n channel_buffer_capacity: 16,\n allow_stateless: true,\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/session/local.rs", "use std::{\n collections::{HashMap, HashSet, VecDeque},\n num::ParseIntError,\n sync::Arc,\n time::Duration,\n};\n\nuse futures::Stream;\nuse thiserror::Error;\nuse tokio::sync::{\n mpsc::{Receiver, Sender},\n oneshot,\n};\nuse tokio_stream::wrappers::ReceiverStream;\nuse tracing::instrument;\n\nuse crate::{\n RoleServer,\n model::{\n CancelledNotificationParam, ClientJsonRpcMessage, ClientNotification, ClientRequest,\n JsonRpcNotification, JsonRpcRequest, Notification, ProgressNotificationParam,\n ProgressToken, RequestId, ServerJsonRpcMessage, ServerNotification,\n },\n transport::{\n WorkerTransport,\n common::server_side_http::{SessionId, session_id},\n worker::{Worker, WorkerContext, WorkerQuitReason, WorkerSendRequest},\n },\n};\n\n#[derive(Debug, Default)]\npub struct LocalSessionManager {\n pub sessions: tokio::sync::RwLock>,\n pub session_config: SessionConfig,\n}\n\n#[derive(Debug, Error)]\npub enum LocalSessionManagerError {\n #[error(\"Session not found: {0}\")]\n SessionNotFound(SessionId),\n #[error(\"Session error: {0}\")]\n SessionError(#[from] SessionError),\n #[error(\"Invalid event id: {0}\")]\n InvalidEventId(#[from] EventIdParseError),\n}\nimpl SessionManager for LocalSessionManager {\n type Error = LocalSessionManagerError;\n type Transport = WorkerTransport;\n async fn create_session(&self) -> Result<(SessionId, Self::Transport), Self::Error> {\n let id = session_id();\n let (handle, worker) = create_local_session(id.clone(), self.session_config.clone());\n self.sessions.write().await.insert(id.clone(), handle);\n Ok((id, WorkerTransport::spawn(worker)))\n }\n async fn initialize_session(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> Result {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let response = handle.initialize(message).await?;\n Ok(response)\n }\n async fn close_session(&self, id: &SessionId) -> Result<(), Self::Error> {\n let mut sessions = self.sessions.write().await;\n if let Some(handle) = sessions.remove(id) {\n handle.close().await?;\n }\n Ok(())\n }\n async fn has_session(&self, id: &SessionId) -> Result {\n let sessions = self.sessions.read().await;\n Ok(sessions.contains_key(id))\n }\n async fn create_stream(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> Result + Send + 'static, Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let receiver = handle.establish_request_wise_channel().await?;\n handle\n .push_message(message, receiver.http_request_id)\n .await?;\n Ok(ReceiverStream::new(receiver.inner))\n }\n\n async fn create_standalone_stream(\n &self,\n id: &SessionId,\n ) -> Result + Send + 'static, Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let receiver = handle.establish_common_channel().await?;\n Ok(ReceiverStream::new(receiver.inner))\n }\n\n async fn resume(\n &self,\n id: &SessionId,\n last_event_id: String,\n ) -> Result + Send + 'static, Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let receiver = handle.resume(last_event_id.parse()?).await?;\n Ok(ReceiverStream::new(receiver.inner))\n }\n\n async fn accept_message(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> Result<(), Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n handle.push_message(message, None).await?;\n Ok(())\n }\n}\n\n/// `/request_id>`\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct EventId {\n http_request_id: Option,\n index: usize,\n}\n\nimpl std::fmt::Display for EventId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.index)?;\n match &self.http_request_id {\n Some(http_request_id) => write!(f, \"/{http_request_id}\"),\n None => write!(f, \"\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Error)]\npub enum EventIdParseError {\n #[error(\"Invalid index: {0}\")]\n InvalidIndex(ParseIntError),\n #[error(\"Invalid numeric request id: {0}\")]\n InvalidNumericRequestId(ParseIntError),\n #[error(\"Missing request id type\")]\n InvalidRequestIdType,\n #[error(\"Missing request id\")]\n MissingRequestId,\n}\n\nimpl std::str::FromStr for EventId {\n type Err = EventIdParseError;\n fn from_str(s: &str) -> Result {\n if let Some((index, request_id)) = s.split_once(\"/\") {\n let index = usize::from_str(index).map_err(EventIdParseError::InvalidIndex)?;\n let request_id = u64::from_str(request_id).map_err(EventIdParseError::InvalidIndex)?;\n Ok(EventId {\n http_request_id: Some(request_id),\n index,\n })\n } else {\n let index = usize::from_str(s).map_err(EventIdParseError::InvalidIndex)?;\n Ok(EventId {\n http_request_id: None,\n index,\n })\n }\n }\n}\n\nuse super::{ServerSseMessage, SessionManager};\n\nstruct CachedTx {\n tx: Sender,\n cache: VecDeque,\n http_request_id: Option,\n capacity: usize,\n}\n\nimpl CachedTx {\n fn new(tx: Sender, http_request_id: Option) -> Self {\n Self {\n cache: VecDeque::with_capacity(tx.capacity()),\n capacity: tx.capacity(),\n tx,\n http_request_id,\n }\n }\n fn new_common(tx: Sender) -> Self {\n Self::new(tx, None)\n }\n\n async fn send(&mut self, message: ServerJsonRpcMessage) {\n let index = self.cache.back().map_or(0, |m| {\n m.event_id\n .as_deref()\n .unwrap_or_default()\n .parse::()\n .expect(\"valid event id\")\n .index\n + 1\n });\n let event_id = EventId {\n http_request_id: self.http_request_id,\n index,\n };\n let message = ServerSseMessage {\n event_id: Some(event_id.to_string()),\n message: Arc::new(message),\n };\n if self.cache.len() >= self.capacity {\n self.cache.pop_front();\n self.cache.push_back(message.clone());\n } else {\n self.cache.push_back(message.clone());\n }\n let _ = self.tx.send(message).await.inspect_err(|e| {\n let event_id = &e.0.event_id;\n tracing::trace!(?event_id, \"trying to send message in a closed session\")\n });\n }\n\n async fn sync(&mut self, index: usize) -> Result<(), SessionError> {\n let Some(front) = self.cache.front() else {\n return Ok(());\n };\n let front_event_id = front\n .event_id\n .as_deref()\n .unwrap_or_default()\n .parse::()?;\n let sync_index = index.saturating_sub(front_event_id.index);\n if sync_index > self.cache.len() {\n // invalid index\n return Err(SessionError::InvalidEventId);\n }\n for message in self.cache.iter().skip(sync_index) {\n let send_result = self.tx.send(message.clone()).await;\n if send_result.is_err() {\n let event_id: EventId = message.event_id.as_deref().unwrap_or_default().parse()?;\n return Err(SessionError::ChannelClosed(Some(event_id.index as u64)));\n }\n }\n Ok(())\n }\n}\n\nstruct HttpRequestWise {\n resources: HashSet,\n tx: CachedTx,\n}\n\ntype HttpRequestId = u64;\n#[derive(Debug, Clone, Hash, PartialEq, Eq)]\nenum ResourceKey {\n McpRequestId(RequestId),\n ProgressToken(ProgressToken),\n}\n\npub struct LocalSessionWorker {\n id: SessionId,\n next_http_request_id: HttpRequestId,\n tx_router: HashMap,\n resource_router: HashMap,\n common: CachedTx,\n event_rx: Receiver,\n session_config: SessionConfig,\n}\n\nimpl LocalSessionWorker {\n pub fn id(&self) -> &SessionId {\n &self.id\n }\n}\n\n#[derive(Debug, Error)]\npub enum SessionError {\n #[error(\"Invalid request id: {0}\")]\n DuplicatedRequestId(HttpRequestId),\n #[error(\"Channel closed: {0:?}\")]\n ChannelClosed(Option),\n #[error(\"Cannot parse event id: {0}\")]\n EventIdParseError(#[from] EventIdParseError),\n #[error(\"Session service terminated\")]\n SessionServiceTerminated,\n #[error(\"Invalid event id\")]\n InvalidEventId,\n #[error(\"Transport closed\")]\n TransportClosed,\n #[error(\"IO error: {0}\")]\n Io(#[from] std::io::Error),\n #[error(\"Tokio join error {0}\")]\n TokioJoinError(#[from] tokio::task::JoinError),\n}\n\nimpl From for std::io::Error {\n fn from(value: SessionError) -> Self {\n match value {\n SessionError::Io(io) => io,\n _ => std::io::Error::new(std::io::ErrorKind::Other, format!(\"Session error: {value}\")),\n }\n }\n}\n\nenum OutboundChannel {\n RequestWise { id: HttpRequestId, close: bool },\n Common,\n}\n\npub struct StreamableHttpMessageReceiver {\n pub http_request_id: Option,\n pub inner: Receiver,\n}\n\nimpl LocalSessionWorker {\n fn unregister_resource(&mut self, resource: &ResourceKey) {\n if let Some(http_request_id) = self.resource_router.remove(resource) {\n tracing::trace!(?resource, http_request_id, \"unregister resource\");\n if let Some(channel) = self.tx_router.get_mut(&http_request_id) {\n // It's okey to do so, since we don't handle batch json rpc request anymore\n // and this can be refactored after the batch request is removed in the coming version.\n if channel.resources.is_empty() || matches!(resource, ResourceKey::McpRequestId(_))\n {\n tracing::debug!(http_request_id, \"close http request wise channel\");\n if let Some(channel) = self.tx_router.remove(&http_request_id) {\n for resource in channel.resources {\n self.resource_router.remove(&resource);\n }\n }\n }\n } else {\n tracing::warn!(http_request_id, \"http request wise channel not found\");\n }\n }\n }\n fn register_resource(&mut self, resource: ResourceKey, http_request_id: HttpRequestId) {\n tracing::trace!(?resource, http_request_id, \"register resource\");\n if let Some(channel) = self.tx_router.get_mut(&http_request_id) {\n channel.resources.insert(resource.clone());\n self.resource_router.insert(resource, http_request_id);\n }\n }\n fn register_request(\n &mut self,\n request: &JsonRpcRequest,\n http_request_id: HttpRequestId,\n ) {\n use crate::model::GetMeta;\n self.register_resource(\n ResourceKey::McpRequestId(request.id.clone()),\n http_request_id,\n );\n if let Some(progress_token) = request.request.get_meta().get_progress_token() {\n self.register_resource(\n ResourceKey::ProgressToken(progress_token.clone()),\n http_request_id,\n );\n }\n }\n fn catch_cancellation_notification(\n &mut self,\n notification: &JsonRpcNotification,\n ) {\n if let ClientNotification::CancelledNotification(n) = ¬ification.notification {\n let request_id = n.params.request_id.clone();\n let resource = ResourceKey::McpRequestId(request_id);\n self.unregister_resource(&resource);\n }\n }\n fn next_http_request_id(&mut self) -> HttpRequestId {\n let id = self.next_http_request_id;\n self.next_http_request_id = self.next_http_request_id.wrapping_add(1);\n id\n }\n async fn establish_request_wise_channel(\n &mut self,\n ) -> Result {\n let http_request_id = self.next_http_request_id();\n let (tx, rx) = tokio::sync::mpsc::channel(self.session_config.channel_capacity);\n self.tx_router.insert(\n http_request_id,\n HttpRequestWise {\n resources: Default::default(),\n tx: CachedTx::new(tx, Some(http_request_id)),\n },\n );\n tracing::debug!(http_request_id, \"establish new request wise channel\");\n Ok(StreamableHttpMessageReceiver {\n http_request_id: Some(http_request_id),\n inner: rx,\n })\n }\n fn resolve_outbound_channel(&self, message: &ServerJsonRpcMessage) -> OutboundChannel {\n match &message {\n ServerJsonRpcMessage::Request(_) => OutboundChannel::Common,\n ServerJsonRpcMessage::Notification(JsonRpcNotification {\n notification:\n ServerNotification::ProgressNotification(Notification {\n params: ProgressNotificationParam { progress_token, .. },\n ..\n }),\n ..\n }) => {\n let id = self\n .resource_router\n .get(&ResourceKey::ProgressToken(progress_token.clone()));\n\n if let Some(id) = id {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::Notification(JsonRpcNotification {\n notification:\n ServerNotification::CancelledNotification(Notification {\n params: CancelledNotificationParam { request_id, .. },\n ..\n }),\n ..\n }) => {\n if let Some(id) = self\n .resource_router\n .get(&ResourceKey::McpRequestId(request_id.clone()))\n {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::Notification(_) => OutboundChannel::Common,\n ServerJsonRpcMessage::Response(json_rpc_response) => {\n if let Some(id) = self\n .resource_router\n .get(&ResourceKey::McpRequestId(json_rpc_response.id.clone()))\n {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::Error(json_rpc_error) => {\n if let Some(id) = self\n .resource_router\n .get(&ResourceKey::McpRequestId(json_rpc_error.id.clone()))\n {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::BatchRequest(_) | ServerJsonRpcMessage::BatchResponse(_) => {\n // the server side should never yield a batch request or response now\n unreachable!(\"server side won't yield batch request or response\")\n }\n }\n }\n async fn handle_server_message(\n &mut self,\n message: ServerJsonRpcMessage,\n ) -> Result<(), SessionError> {\n let outbound_channel = self.resolve_outbound_channel(&message);\n match outbound_channel {\n OutboundChannel::RequestWise { id, close } => {\n if let Some(request_wise) = self.tx_router.get_mut(&id) {\n request_wise.tx.send(message).await;\n if close {\n self.tx_router.remove(&id);\n }\n } else {\n return Err(SessionError::ChannelClosed(Some(id)));\n }\n }\n OutboundChannel::Common => self.common.send(message).await,\n }\n Ok(())\n }\n async fn resume(\n &mut self,\n last_event_id: EventId,\n ) -> Result {\n match last_event_id.http_request_id {\n Some(http_request_id) => {\n let request_wise = self\n .tx_router\n .get_mut(&http_request_id)\n .ok_or(SessionError::ChannelClosed(Some(http_request_id)))?;\n let channel = tokio::sync::mpsc::channel(self.session_config.channel_capacity);\n let (tx, rx) = channel;\n request_wise.tx.tx = tx;\n let index = last_event_id.index;\n // sync messages after index\n request_wise.tx.sync(index).await?;\n Ok(StreamableHttpMessageReceiver {\n http_request_id: Some(http_request_id),\n inner: rx,\n })\n }\n None => {\n let channel = tokio::sync::mpsc::channel(self.session_config.channel_capacity);\n let (tx, rx) = channel;\n self.common.tx = tx;\n let index = last_event_id.index;\n // sync messages after index\n self.common.sync(index).await?;\n Ok(StreamableHttpMessageReceiver {\n http_request_id: None,\n inner: rx,\n })\n }\n }\n }\n}\n\nenum SessionEvent {\n ClientMessage {\n message: ClientJsonRpcMessage,\n http_request_id: Option,\n },\n EstablishRequestWiseChannel {\n responder: oneshot::Sender>,\n },\n CloseRequestWiseChannel {\n id: HttpRequestId,\n responder: oneshot::Sender>,\n },\n Resume {\n last_event_id: EventId,\n responder: oneshot::Sender>,\n },\n InitializeRequest {\n request: ClientJsonRpcMessage,\n responder: oneshot::Sender>,\n },\n Close,\n}\n\n#[derive(Debug, Clone)]\npub enum SessionQuitReason {\n ServiceTerminated,\n ClientTerminated,\n ExpectInitializeRequest,\n ExpectInitializeResponse,\n Cancelled,\n}\n\n#[derive(Debug, Clone)]\npub struct LocalSessionHandle {\n id: SessionId,\n // after all event_tx drop, inner task will be terminated\n event_tx: Sender,\n}\n\nimpl LocalSessionHandle {\n /// Get the session id\n pub fn id(&self) -> &SessionId {\n &self.id\n }\n\n /// Close the session\n pub async fn close(&self) -> Result<(), SessionError> {\n self.event_tx\n .send(SessionEvent::Close)\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n Ok(())\n }\n\n /// Send a message to the session\n pub async fn push_message(\n &self,\n message: ClientJsonRpcMessage,\n http_request_id: Option,\n ) -> Result<(), SessionError> {\n self.event_tx\n .send(SessionEvent::ClientMessage {\n message,\n http_request_id,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n Ok(())\n }\n\n /// establish a channel for a http-request, the corresponded message from server will be\n /// sent through this channel. The channel will be closed when the request is completed,\n /// or you can close it manually by calling [`LocalSessionHandle::close_request_wise_channel`].\n pub async fn establish_request_wise_channel(\n &self,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::EstablishRequestWiseChannel { responder: tx })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// close the http-request wise channel.\n pub async fn close_request_wise_channel(\n &self,\n request_id: HttpRequestId,\n ) -> Result<(), SessionError> {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::CloseRequestWiseChannel {\n id: request_id,\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// Establish a common channel for general purpose messages.\n pub async fn establish_common_channel(\n &self,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::Resume {\n last_event_id: EventId {\n http_request_id: None,\n index: 0,\n },\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// Resume streaming response by the last event id. This is suitable for both request wise and common channel.\n pub async fn resume(\n &self,\n last_event_id: EventId,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::Resume {\n last_event_id,\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// Send an initialize request to the session. And wait for the initialized response.\n ///\n /// This is used to establish a session with the server.\n pub async fn initialize(\n &self,\n request: ClientJsonRpcMessage,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::InitializeRequest {\n request,\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n}\n\npub type SessionTransport = WorkerTransport;\n\nimpl Worker for LocalSessionWorker {\n type Error = SessionError;\n type Role = RoleServer;\n fn err_closed() -> Self::Error {\n SessionError::TransportClosed\n }\n fn err_join(e: tokio::task::JoinError) -> Self::Error {\n SessionError::TokioJoinError(e)\n }\n fn config(&self) -> crate::transport::worker::WorkerConfig {\n crate::transport::worker::WorkerConfig {\n name: Some(format!(\"streamable-http-session-{}\", self.id)),\n channel_buffer_capacity: self.session_config.channel_capacity,\n }\n }\n #[instrument(name = \"streamable_http_session\", skip_all, fields(id = self.id.as_ref()))]\n async fn run(mut self, mut context: WorkerContext) -> Result<(), WorkerQuitReason> {\n enum InnerEvent {\n FromHttpService(SessionEvent),\n FromHandler(WorkerSendRequest),\n }\n // waiting for initialize request\n let evt = self.event_rx.recv().await.ok_or_else(|| {\n WorkerQuitReason::fatal(\"transport terminated\", \"get initialize request\")\n })?;\n let SessionEvent::InitializeRequest { request, responder } = evt else {\n return Err(WorkerQuitReason::fatal(\n \"unexpected message\",\n \"get initialize request\",\n ));\n };\n context.send_to_handler(request).await?;\n let send_initialize_response = context.recv_from_handler().await?;\n responder\n .send(Ok(send_initialize_response.message))\n .map_err(|_| {\n WorkerQuitReason::fatal(\n \"failed to send initialize response to http service\",\n \"send initialize response\",\n )\n })?;\n send_initialize_response\n .responder\n .send(Ok(()))\n .map_err(|_| WorkerQuitReason::HandlerTerminated)?;\n let ct = context.cancellation_token.clone();\n let keep_alive = self.session_config.keep_alive.unwrap_or(Duration::MAX);\n loop {\n let keep_alive_timeout = tokio::time::sleep(keep_alive);\n let event = tokio::select! {\n event = self.event_rx.recv() => {\n if let Some(event) = event {\n InnerEvent::FromHttpService(event)\n } else {\n return Err(WorkerQuitReason::fatal(\"session dropped\", \"waiting next session event\"))\n }\n },\n from_handler = context.recv_from_handler() => {\n InnerEvent::FromHandler(from_handler?)\n }\n _ = ct.cancelled() => {\n return Err(WorkerQuitReason::Cancelled)\n }\n _ = keep_alive_timeout => {\n return Err(WorkerQuitReason::fatal(\"keep live timeout\", \"poll next session event\"))\n }\n };\n match event {\n InnerEvent::FromHandler(WorkerSendRequest { message, responder }) => {\n // catch response\n let to_unregister = match &message {\n crate::model::JsonRpcMessage::Response(json_rpc_response) => {\n let request_id = json_rpc_response.id.clone();\n Some(ResourceKey::McpRequestId(request_id))\n }\n crate::model::JsonRpcMessage::Error(json_rpc_error) => {\n let request_id = json_rpc_error.id.clone();\n Some(ResourceKey::McpRequestId(request_id))\n }\n _ => {\n None\n // no need to unregister resource\n }\n };\n let handle_result = self.handle_server_message(message).await;\n let _ = responder.send(handle_result).inspect_err(|error| {\n tracing::warn!(?error, \"failed to send message to http service handler\");\n });\n if let Some(to_unregister) = to_unregister {\n self.unregister_resource(&to_unregister);\n }\n }\n InnerEvent::FromHttpService(SessionEvent::ClientMessage {\n message: json_rpc_message,\n http_request_id,\n }) => {\n match &json_rpc_message {\n crate::model::JsonRpcMessage::Request(request) => {\n if let Some(http_request_id) = http_request_id {\n self.register_request(request, http_request_id)\n }\n }\n crate::model::JsonRpcMessage::Notification(notification) => {\n self.catch_cancellation_notification(notification)\n }\n crate::model::JsonRpcMessage::BatchRequest(items) => {\n for r in items {\n match r {\n crate::model::JsonRpcBatchRequestItem::Request(request) => {\n if let Some(http_request_id) = http_request_id {\n self.register_request(request, http_request_id)\n }\n }\n crate::model::JsonRpcBatchRequestItem::Notification(\n notification,\n ) => self.catch_cancellation_notification(notification),\n }\n }\n }\n _ => {}\n }\n context.send_to_handler(json_rpc_message).await?;\n }\n InnerEvent::FromHttpService(SessionEvent::EstablishRequestWiseChannel {\n responder,\n }) => {\n let handle_result = self.establish_request_wise_channel().await;\n let _ = responder.send(handle_result);\n }\n InnerEvent::FromHttpService(SessionEvent::CloseRequestWiseChannel {\n id,\n responder,\n }) => {\n let _handle_result = self.tx_router.remove(&id);\n let _ = responder.send(Ok(()));\n }\n InnerEvent::FromHttpService(SessionEvent::Resume {\n last_event_id,\n responder,\n }) => {\n let handle_result = self.resume(last_event_id).await;\n let _ = responder.send(handle_result);\n }\n InnerEvent::FromHttpService(SessionEvent::Close) => {\n return Err(WorkerQuitReason::TransportClosed);\n }\n _ => {\n // ignore\n }\n }\n }\n }\n}\n\n#[derive(Debug, Clone)]\npub struct SessionConfig {\n /// the capacity of the channel for the session. Default is 16.\n pub channel_capacity: usize,\n /// if set, the session will be closed after this duration of inactivity.\n pub keep_alive: Option,\n}\n\nimpl SessionConfig {\n pub const DEFAULT_CHANNEL_CAPACITY: usize = 16;\n}\n\nimpl Default for SessionConfig {\n fn default() -> Self {\n Self {\n channel_capacity: Self::DEFAULT_CHANNEL_CAPACITY,\n keep_alive: None,\n }\n }\n}\n\n/// Create a new session with the given id and configuration.\n///\n/// This function will return a pair of [`LocalSessionHandle`] and [`LocalSessionWorker`].\n///\n/// You can run the [`LocalSessionWorker`] as a transport for mcp server. And use the [`LocalSessionHandle`] operate the session.\npub fn create_local_session(\n id: impl Into,\n config: SessionConfig,\n) -> (LocalSessionHandle, LocalSessionWorker) {\n let id = id.into();\n let (event_tx, event_rx) = tokio::sync::mpsc::channel(config.channel_capacity);\n let (common_tx, _) = tokio::sync::mpsc::channel(config.channel_capacity);\n let common = CachedTx::new_common(common_tx);\n tracing::info!(session_id = ?id, \"create new session\");\n let handle = LocalSessionHandle {\n event_tx,\n id: id.clone(),\n };\n let session_worker = LocalSessionWorker {\n next_http_request_id: 0,\n id,\n tx_router: HashMap::new(),\n resource_router: HashMap::new(),\n common,\n event_rx,\n session_config: config.clone(),\n };\n (handle, session_worker)\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/tower.rs", "use std::{convert::Infallible, fmt::Display, sync::Arc, time::Duration};\n\nuse bytes::Bytes;\nuse futures::{StreamExt, future::BoxFuture};\nuse http::{Method, Request, Response, header::ALLOW};\nuse http_body::Body;\nuse http_body_util::{BodyExt, Full, combinators::BoxBody};\nuse tokio_stream::wrappers::ReceiverStream;\n\nuse super::session::SessionManager;\nuse crate::{\n RoleServer,\n model::{ClientJsonRpcMessage, ClientRequest, GetExtensions},\n serve_server,\n service::serve_directly,\n transport::{\n OneshotTransport, TransportAdapterIdentity,\n common::{\n http_header::{\n EVENT_STREAM_MIME_TYPE, HEADER_LAST_EVENT_ID, HEADER_SESSION_ID, JSON_MIME_TYPE,\n },\n server_side_http::{\n BoxResponse, ServerSseMessage, accepted_response, expect_json,\n internal_error_response, sse_stream_response, unexpected_message_response,\n },\n },\n },\n};\n\n#[derive(Debug, Clone)]\npub struct StreamableHttpServerConfig {\n /// The ping message duration for SSE connections.\n pub sse_keep_alive: Option,\n /// If true, the server will create a session for each request and keep it alive.\n pub stateful_mode: bool,\n}\n\nimpl Default for StreamableHttpServerConfig {\n fn default() -> Self {\n Self {\n sse_keep_alive: Some(Duration::from_secs(15)),\n stateful_mode: true,\n }\n }\n}\n\npub struct StreamableHttpService {\n pub config: StreamableHttpServerConfig,\n session_manager: Arc,\n service_factory: Arc Result + Send + Sync>,\n}\n\nimpl Clone for StreamableHttpService {\n fn clone(&self) -> Self {\n Self {\n config: self.config.clone(),\n session_manager: self.session_manager.clone(),\n service_factory: self.service_factory.clone(),\n }\n }\n}\n\nimpl tower_service::Service> for StreamableHttpService\nwhere\n RequestBody: Body + Send + 'static,\n S: crate::Service,\n M: SessionManager,\n RequestBody::Error: Display,\n RequestBody::Data: Send + 'static,\n{\n type Response = BoxResponse;\n type Error = Infallible;\n type Future = BoxFuture<'static, Result>;\n fn call(&mut self, req: http::Request) -> Self::Future {\n let service = self.clone();\n Box::pin(async move {\n let response = service.handle(req).await;\n Ok(response)\n })\n }\n fn poll_ready(\n &mut self,\n _cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n std::task::Poll::Ready(Ok(()))\n }\n}\n\nimpl StreamableHttpService\nwhere\n S: crate::Service + Send + 'static,\n M: SessionManager,\n{\n pub fn new(\n service_factory: impl Fn() -> Result + Send + Sync + 'static,\n session_manager: Arc,\n config: StreamableHttpServerConfig,\n ) -> Self {\n Self {\n config,\n session_manager,\n service_factory: Arc::new(service_factory),\n }\n }\n fn get_service(&self) -> Result {\n (self.service_factory)()\n }\n pub async fn handle(&self, request: Request) -> Response>\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n let method = request.method().clone();\n let result = match method {\n Method::GET => self.handle_get(request).await,\n Method::POST => self.handle_post(request).await,\n Method::DELETE => self.handle_delete(request).await,\n _ => {\n // Handle other methods or return an error\n let response = Response::builder()\n .status(http::StatusCode::METHOD_NOT_ALLOWED)\n .header(ALLOW, \"GET, POST, DELETE\")\n .body(Full::new(Bytes::from(\"Method Not Allowed\")).boxed())\n .expect(\"valid response\");\n return response;\n }\n };\n match result {\n Ok(response) => response,\n Err(response) => response,\n }\n }\n async fn handle_get(&self, request: Request) -> Result\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n // check accept header\n if !request\n .headers()\n .get(http::header::ACCEPT)\n .and_then(|header| header.to_str().ok())\n .is_some_and(|header| header.contains(EVENT_STREAM_MIME_TYPE))\n {\n return Ok(Response::builder()\n .status(http::StatusCode::NOT_ACCEPTABLE)\n .body(\n Full::new(Bytes::from(\n \"Not Acceptable: Client must accept text/event-stream\",\n ))\n .boxed(),\n )\n .expect(\"valid response\"));\n }\n // check session id\n let session_id = request\n .headers()\n .get(HEADER_SESSION_ID)\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_owned().into());\n let Some(session_id) = session_id else {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session ID is required\")).boxed())\n .expect(\"valid response\"));\n };\n // check if session exists\n let has_session = self\n .session_manager\n .has_session(&session_id)\n .await\n .map_err(internal_error_response(\"check session\"))?;\n if !has_session {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session not found\")).boxed())\n .expect(\"valid response\"));\n }\n // check if last event id is provided\n let last_event_id = request\n .headers()\n .get(HEADER_LAST_EVENT_ID)\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_owned());\n if let Some(last_event_id) = last_event_id {\n // check if session has this event id\n let stream = self\n .session_manager\n .resume(&session_id, last_event_id)\n .await\n .map_err(internal_error_response(\"resume session\"))?;\n Ok(sse_stream_response(stream, self.config.sse_keep_alive))\n } else {\n // create standalone stream\n let stream = self\n .session_manager\n .create_standalone_stream(&session_id)\n .await\n .map_err(internal_error_response(\"create standalone stream\"))?;\n Ok(sse_stream_response(stream, self.config.sse_keep_alive))\n }\n }\n\n async fn handle_post(&self, request: Request) -> Result\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n // check accept header\n if !request\n .headers()\n .get(http::header::ACCEPT)\n .and_then(|header| header.to_str().ok())\n .is_some_and(|header| {\n header.contains(JSON_MIME_TYPE) && header.contains(EVENT_STREAM_MIME_TYPE)\n })\n {\n return Ok(Response::builder()\n .status(http::StatusCode::NOT_ACCEPTABLE)\n .body(Full::new(Bytes::from(\"Not Acceptable: Client must accept both application/json and text/event-stream\")).boxed())\n .expect(\"valid response\"));\n }\n\n // check content type\n if !request\n .headers()\n .get(http::header::CONTENT_TYPE)\n .and_then(|header| header.to_str().ok())\n .is_some_and(|header| header.starts_with(JSON_MIME_TYPE))\n {\n return Ok(Response::builder()\n .status(http::StatusCode::UNSUPPORTED_MEDIA_TYPE)\n .body(\n Full::new(Bytes::from(\n \"Unsupported Media Type: Content-Type must be application/json\",\n ))\n .boxed(),\n )\n .expect(\"valid response\"));\n }\n\n // json deserialize request body\n let (part, body) = request.into_parts();\n let mut message = match expect_json(body).await {\n Ok(message) => message,\n Err(response) => return Ok(response),\n };\n\n if self.config.stateful_mode {\n // do we have a session id?\n let session_id = part\n .headers\n .get(HEADER_SESSION_ID)\n .and_then(|v| v.to_str().ok());\n if let Some(session_id) = session_id {\n let session_id = session_id.to_owned().into();\n let has_session = self\n .session_manager\n .has_session(&session_id)\n .await\n .map_err(internal_error_response(\"check session\"))?;\n if !has_session {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session not found\")).boxed())\n .expect(\"valid response\"));\n }\n\n // inject request part to extensions\n match &mut message {\n ClientJsonRpcMessage::Request(req) => {\n req.request.extensions_mut().insert(part);\n }\n ClientJsonRpcMessage::Notification(not) => {\n not.notification.extensions_mut().insert(part);\n }\n _ => {\n // skip\n }\n }\n\n match message {\n ClientJsonRpcMessage::Request(_) => {\n let stream = self\n .session_manager\n .create_stream(&session_id, message)\n .await\n .map_err(internal_error_response(\"get session\"))?;\n Ok(sse_stream_response(stream, self.config.sse_keep_alive))\n }\n ClientJsonRpcMessage::Notification(_)\n | ClientJsonRpcMessage::Response(_)\n | ClientJsonRpcMessage::Error(_) => {\n // handle notification\n self.session_manager\n .accept_message(&session_id, message)\n .await\n .map_err(internal_error_response(\"accept message\"))?;\n Ok(accepted_response())\n }\n _ => Ok(Response::builder()\n .status(http::StatusCode::NOT_IMPLEMENTED)\n .body(\n Full::new(Bytes::from(\"Batch requests are not supported yet\")).boxed(),\n )\n .expect(\"valid response\")),\n }\n } else {\n let (session_id, transport) = self\n .session_manager\n .create_session()\n .await\n .map_err(internal_error_response(\"create session\"))?;\n if let ClientJsonRpcMessage::Request(req) = &mut message {\n if !matches!(req.request, ClientRequest::InitializeRequest(_)) {\n return Err(unexpected_message_response(\"initialize request\"));\n }\n // inject request part to extensions\n req.request.extensions_mut().insert(part);\n } else {\n return Err(unexpected_message_response(\"initialize request\"));\n }\n let service = self\n .get_service()\n .map_err(internal_error_response(\"get service\"))?;\n // spawn a task to serve the session\n tokio::spawn({\n let session_manager = self.session_manager.clone();\n let session_id = session_id.clone();\n async move {\n let service = serve_server::(\n service, transport,\n )\n .await;\n match service {\n Ok(service) => {\n // on service created\n let _ = service.waiting().await;\n }\n Err(e) => {\n tracing::error!(\"Failed to create service: {e}\");\n }\n }\n let _ = session_manager\n .close_session(&session_id)\n .await\n .inspect_err(|e| {\n tracing::error!(\"Failed to close session {session_id}: {e}\");\n });\n }\n });\n // get initialize response\n let response = self\n .session_manager\n .initialize_session(&session_id, message)\n .await\n .map_err(internal_error_response(\"create stream\"))?;\n let mut response = sse_stream_response(\n futures::stream::once({\n async move {\n ServerSseMessage {\n event_id: None,\n message: response.into(),\n }\n }\n }),\n self.config.sse_keep_alive,\n );\n\n response.headers_mut().insert(\n HEADER_SESSION_ID,\n session_id\n .parse()\n .map_err(internal_error_response(\"create session id header\"))?,\n );\n Ok(response)\n }\n } else {\n let service = self\n .get_service()\n .map_err(internal_error_response(\"get service\"))?;\n match message {\n ClientJsonRpcMessage::Request(mut request) => {\n request.request.extensions_mut().insert(part);\n let (transport, receiver) =\n OneshotTransport::::new(ClientJsonRpcMessage::Request(request));\n let service = serve_directly(service, transport, None);\n tokio::spawn(async move {\n // on service created\n let _ = service.waiting().await;\n });\n Ok(sse_stream_response(\n ReceiverStream::new(receiver).map(|message| {\n tracing::info!(?message);\n ServerSseMessage {\n event_id: None,\n message: message.into(),\n }\n }),\n self.config.sse_keep_alive,\n ))\n }\n ClientJsonRpcMessage::Notification(_notification) => {\n // ignore\n Ok(accepted_response())\n }\n ClientJsonRpcMessage::Response(_json_rpc_response) => Ok(accepted_response()),\n ClientJsonRpcMessage::Error(_json_rpc_error) => Ok(accepted_response()),\n _ => Ok(Response::builder()\n .status(http::StatusCode::NOT_IMPLEMENTED)\n .body(Full::new(Bytes::from(\"Batch requests are not supported yet\")).boxed())\n .expect(\"valid response\")),\n }\n }\n }\n\n async fn handle_delete(&self, request: Request) -> Result\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n // check session id\n let session_id = request\n .headers()\n .get(HEADER_SESSION_ID)\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_owned().into());\n let Some(session_id) = session_id else {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session ID is required\")).boxed())\n .expect(\"valid response\"));\n };\n // close session\n self.session_manager\n .close_session(&session_id)\n .await\n .map_err(internal_error_response(\"close session\"))?;\n Ok(accepted_response())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/service.rs", "use futures::{FutureExt, future::BoxFuture};\nuse thiserror::Error;\n\nuse crate::{\n error::ErrorData as McpError,\n model::{\n CancelledNotification, CancelledNotificationParam, Extensions, GetExtensions, GetMeta,\n JsonRpcBatchRequestItem, JsonRpcBatchResponseItem, JsonRpcError, JsonRpcMessage,\n JsonRpcNotification, JsonRpcRequest, JsonRpcResponse, Meta, NumberOrString, ProgressToken,\n RequestId, ServerJsonRpcMessage,\n },\n transport::{DynamicTransportError, IntoTransport, Transport},\n};\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\nmod client;\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub use client::*;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\nmod server;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub use server::*;\n#[cfg(feature = \"tower\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"tower\")))]\nmod tower;\nuse tokio_util::sync::{CancellationToken, DropGuard};\n#[cfg(feature = \"tower\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"tower\")))]\npub use tower::*;\nuse tracing::instrument;\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum ServiceError {\n #[error(\"Mcp error: {0}\")]\n McpError(McpError),\n #[error(\"Transport send error: {0}\")]\n TransportSend(DynamicTransportError),\n #[error(\"Transport closed\")]\n TransportClosed,\n #[error(\"Unexpected response type\")]\n UnexpectedResponse,\n #[error(\"task cancelled for reason {}\", reason.as_deref().unwrap_or(\"\"))]\n Cancelled { reason: Option },\n #[error(\"request timeout after {}\", chrono::Duration::from_std(*timeout).unwrap_or_default())]\n Timeout { timeout: Duration },\n}\n\ntrait TransferObject:\n std::fmt::Debug + Clone + serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static\n{\n}\n\nimpl TransferObject for T where\n T: std::fmt::Debug\n + serde::Serialize\n + serde::de::DeserializeOwned\n + Send\n + Sync\n + 'static\n + Clone\n{\n}\n\n#[allow(private_bounds, reason = \"there's no the third implementation\")]\npub trait ServiceRole: std::fmt::Debug + Send + Sync + 'static + Copy + Clone {\n type Req: TransferObject + GetMeta + GetExtensions;\n type Resp: TransferObject;\n type Not: TryInto\n + From\n + TransferObject;\n type PeerReq: TransferObject + GetMeta + GetExtensions;\n type PeerResp: TransferObject;\n type PeerNot: TryInto\n + From\n + TransferObject\n + GetMeta\n + GetExtensions;\n type InitializeError;\n const IS_CLIENT: bool;\n type Info: TransferObject;\n type PeerInfo: TransferObject;\n}\n\npub type TxJsonRpcMessage =\n JsonRpcMessage<::Req, ::Resp, ::Not>;\npub type RxJsonRpcMessage = JsonRpcMessage<\n ::PeerReq,\n ::PeerResp,\n ::PeerNot,\n>;\n\npub trait Service: Send + Sync + 'static {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> impl Future> + Send + '_;\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> impl Future> + Send + '_;\n fn get_info(&self) -> R::Info;\n}\n\npub trait ServiceExt: Service + Sized {\n /// Convert this service to a dynamic boxed service\n ///\n /// This could be very helpful when you want to store the services in a collection\n fn into_dyn(self) -> Box> {\n Box::new(self)\n }\n fn serve(\n self,\n transport: T,\n ) -> impl Future, R::InitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized,\n {\n Self::serve_with_ct(self, transport, Default::default())\n }\n fn serve_with_ct(\n self,\n transport: T,\n ct: CancellationToken,\n ) -> impl Future, R::InitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized;\n}\n\nimpl Service for Box> {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n DynService::handle_request(self.as_ref(), request, context)\n }\n\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> impl Future> + Send + '_ {\n DynService::handle_notification(self.as_ref(), notification, context)\n }\n\n fn get_info(&self) -> R::Info {\n DynService::get_info(self.as_ref())\n }\n}\n\npub trait DynService: Send + Sync {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> BoxFuture>;\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> BoxFuture>;\n fn get_info(&self) -> R::Info;\n}\n\nimpl> DynService for S {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> BoxFuture> {\n Box::pin(self.handle_request(request, context))\n }\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> BoxFuture> {\n Box::pin(self.handle_notification(notification, context))\n }\n fn get_info(&self) -> R::Info {\n self.get_info()\n }\n}\n\nuse std::{\n collections::{HashMap, VecDeque},\n ops::Deref,\n sync::{Arc, atomic::AtomicU32},\n time::Duration,\n};\n\nuse tokio::sync::mpsc;\n\npub trait RequestIdProvider: Send + Sync + 'static {\n fn next_request_id(&self) -> RequestId;\n}\n\npub trait ProgressTokenProvider: Send + Sync + 'static {\n fn next_progress_token(&self) -> ProgressToken;\n}\n\npub type AtomicU32RequestIdProvider = AtomicU32Provider;\npub type AtomicU32ProgressTokenProvider = AtomicU32Provider;\n\n#[derive(Debug, Default)]\npub struct AtomicU32Provider {\n id: AtomicU32,\n}\n\nimpl RequestIdProvider for AtomicU32Provider {\n fn next_request_id(&self) -> RequestId {\n RequestId::Number(self.id.fetch_add(1, std::sync::atomic::Ordering::SeqCst))\n }\n}\n\nimpl ProgressTokenProvider for AtomicU32Provider {\n fn next_progress_token(&self) -> ProgressToken {\n ProgressToken(NumberOrString::Number(\n self.id.fetch_add(1, std::sync::atomic::Ordering::SeqCst),\n ))\n }\n}\n\ntype Responder = tokio::sync::oneshot::Sender;\n\n/// A handle to a remote request\n///\n/// You can cancel it by call [`RequestHandle::cancel`] with a reason,\n///\n/// or wait for response by call [`RequestHandle::await_response`]\n#[derive(Debug)]\npub struct RequestHandle {\n pub rx: tokio::sync::oneshot::Receiver>,\n pub options: PeerRequestOptions,\n pub peer: Peer,\n pub id: RequestId,\n pub progress_token: ProgressToken,\n}\n\nimpl RequestHandle {\n pub const REQUEST_TIMEOUT_REASON: &str = \"request timeout\";\n pub async fn await_response(self) -> Result {\n if let Some(timeout) = self.options.timeout {\n let timeout_result = tokio::time::timeout(timeout, async move {\n self.rx.await.map_err(|_e| ServiceError::TransportClosed)?\n })\n .await;\n match timeout_result {\n Ok(response) => response,\n Err(_) => {\n let error = Err(ServiceError::Timeout { timeout });\n // cancel this request\n let notification = CancelledNotification {\n params: CancelledNotificationParam {\n request_id: self.id,\n reason: Some(Self::REQUEST_TIMEOUT_REASON.to_owned()),\n },\n method: crate::model::CancelledNotificationMethod,\n extensions: Default::default(),\n };\n let _ = self.peer.send_notification(notification.into()).await;\n error\n }\n }\n } else {\n self.rx.await.map_err(|_e| ServiceError::TransportClosed)?\n }\n }\n\n /// Cancel this request\n pub async fn cancel(self, reason: Option) -> Result<(), ServiceError> {\n let notification = CancelledNotification {\n params: CancelledNotificationParam {\n request_id: self.id,\n reason,\n },\n method: crate::model::CancelledNotificationMethod,\n extensions: Default::default(),\n };\n self.peer.send_notification(notification.into()).await?;\n Ok(())\n }\n}\n\n#[derive(Debug)]\npub(crate) enum PeerSinkMessage {\n Request {\n request: R::Req,\n id: RequestId,\n responder: Responder>,\n },\n Notification {\n notification: R::Not,\n responder: Responder>,\n },\n}\n\n/// An interface to fetch the remote client or server\n///\n/// For general purpose, call [`Peer::send_request`] or [`Peer::send_notification`] to send message to remote peer.\n///\n/// To create a cancellable request, call [`Peer::send_request_with_option`].\n#[derive(Clone)]\npub struct Peer {\n tx: mpsc::Sender>,\n request_id_provider: Arc,\n progress_token_provider: Arc,\n info: Arc>,\n}\n\nimpl std::fmt::Debug for Peer {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"PeerSink\")\n .field(\"tx\", &self.tx)\n .field(\"is_client\", &R::IS_CLIENT)\n .finish()\n }\n}\n\ntype ProxyOutbound = mpsc::Receiver>;\n\n#[derive(Debug, Default)]\npub struct PeerRequestOptions {\n pub timeout: Option,\n pub meta: Option,\n}\n\nimpl PeerRequestOptions {\n pub fn no_options() -> Self {\n Self::default()\n }\n}\n\nimpl Peer {\n const CLIENT_CHANNEL_BUFFER_SIZE: usize = 1024;\n pub(crate) fn new(\n request_id_provider: Arc,\n peer_info: Option,\n ) -> (Peer, ProxyOutbound) {\n let (tx, rx) = mpsc::channel(Self::CLIENT_CHANNEL_BUFFER_SIZE);\n (\n Self {\n tx,\n request_id_provider,\n progress_token_provider: Arc::new(AtomicU32ProgressTokenProvider::default()),\n info: Arc::new(tokio::sync::OnceCell::new_with(peer_info)),\n },\n rx,\n )\n }\n pub async fn send_notification(&self, notification: R::Not) -> Result<(), ServiceError> {\n let (responder, receiver) = tokio::sync::oneshot::channel();\n self.tx\n .send(PeerSinkMessage::Notification {\n notification,\n responder,\n })\n .await\n .map_err(|_m| ServiceError::TransportClosed)?;\n receiver.await.map_err(|_e| ServiceError::TransportClosed)?\n }\n pub async fn send_request(&self, request: R::Req) -> Result {\n self.send_request_with_option(request, PeerRequestOptions::no_options())\n .await?\n .await_response()\n .await\n }\n\n pub async fn send_cancellable_request(\n &self,\n request: R::Req,\n options: PeerRequestOptions,\n ) -> Result, ServiceError> {\n self.send_request_with_option(request, options).await\n }\n\n pub async fn send_request_with_option(\n &self,\n mut request: R::Req,\n options: PeerRequestOptions,\n ) -> Result, ServiceError> {\n let id = self.request_id_provider.next_request_id();\n let progress_token = self.progress_token_provider.next_progress_token();\n request\n .get_meta_mut()\n .set_progress_token(progress_token.clone());\n if let Some(meta) = options.meta.clone() {\n request.get_meta_mut().extend(meta);\n }\n let (responder, receiver) = tokio::sync::oneshot::channel();\n self.tx\n .send(PeerSinkMessage::Request {\n request,\n id: id.clone(),\n responder,\n })\n .await\n .map_err(|_m| ServiceError::TransportClosed)?;\n Ok(RequestHandle {\n id,\n rx: receiver,\n progress_token,\n options,\n peer: self.clone(),\n })\n }\n pub fn peer_info(&self) -> Option<&R::PeerInfo> {\n self.info.get()\n }\n\n pub fn set_peer_info(&self, info: R::PeerInfo) {\n if self.info.initialized() {\n tracing::warn!(\"trying to set peer info, which is already initialized\");\n } else {\n let _ = self.info.set(info);\n }\n }\n\n pub fn is_transport_closed(&self) -> bool {\n self.tx.is_closed()\n }\n}\n\n#[derive(Debug)]\npub struct RunningService> {\n service: Arc,\n peer: Peer,\n handle: tokio::task::JoinHandle,\n cancellation_token: CancellationToken,\n dg: DropGuard,\n}\nimpl> Deref for RunningService {\n type Target = Peer;\n\n fn deref(&self) -> &Self::Target {\n &self.peer\n }\n}\n\nimpl> RunningService {\n #[inline]\n pub fn peer(&self) -> &Peer {\n &self.peer\n }\n #[inline]\n pub fn service(&self) -> &S {\n self.service.as_ref()\n }\n #[inline]\n pub fn cancellation_token(&self) -> RunningServiceCancellationToken {\n RunningServiceCancellationToken(self.cancellation_token.clone())\n }\n #[inline]\n pub async fn waiting(self) -> Result {\n self.handle.await\n }\n pub async fn cancel(self) -> Result {\n let RunningService { dg, handle, .. } = self;\n dg.disarm().cancel();\n handle.await\n }\n}\n\n// use a wrapper type so we can tweak the implementation if needed\npub struct RunningServiceCancellationToken(CancellationToken);\n\nimpl RunningServiceCancellationToken {\n pub fn cancel(self) {\n self.0.cancel();\n }\n}\n\n#[derive(Debug)]\npub enum QuitReason {\n Cancelled,\n Closed,\n JoinError(tokio::task::JoinError),\n}\n\n/// Request execution context\n#[derive(Debug, Clone)]\npub struct RequestContext {\n /// this token will be cancelled when the [`CancelledNotification`] is received.\n pub ct: CancellationToken,\n pub id: RequestId,\n pub meta: Meta,\n pub extensions: Extensions,\n /// An interface to fetch the remote client or server\n pub peer: Peer,\n}\n\n/// Request execution context\n#[derive(Debug, Clone)]\npub struct NotificationContext {\n pub meta: Meta,\n pub extensions: Extensions,\n /// An interface to fetch the remote client or server\n pub peer: Peer,\n}\n\n/// Use this function to skip initialization process\npub fn serve_directly(\n service: S,\n transport: T,\n peer_info: Option,\n) -> RunningService\nwhere\n R: ServiceRole,\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n serve_directly_with_ct(service, transport, peer_info, Default::default())\n}\n\n/// Use this function to skip initialization process\npub fn serve_directly_with_ct(\n service: S,\n transport: T,\n peer_info: Option,\n ct: CancellationToken,\n) -> RunningService\nwhere\n R: ServiceRole,\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n let (peer, peer_rx) = Peer::new(Arc::new(AtomicU32RequestIdProvider::default()), peer_info);\n serve_inner(service, transport.into_transport(), peer, peer_rx, ct)\n}\n\n#[instrument(skip_all)]\nfn serve_inner(\n service: S,\n transport: T,\n peer: Peer,\n mut peer_rx: tokio::sync::mpsc::Receiver>,\n ct: CancellationToken,\n) -> RunningService\nwhere\n R: ServiceRole,\n S: Service,\n T: Transport + 'static,\n{\n const SINK_PROXY_BUFFER_SIZE: usize = 64;\n let (sink_proxy_tx, mut sink_proxy_rx) =\n tokio::sync::mpsc::channel::>(SINK_PROXY_BUFFER_SIZE);\n let peer_info = peer.peer_info();\n if R::IS_CLIENT {\n tracing::info!(?peer_info, \"Service initialized as client\");\n } else {\n tracing::info!(?peer_info, \"Service initialized as server\");\n }\n\n let mut local_responder_pool =\n HashMap::>>::new();\n let mut local_ct_pool = HashMap::::new();\n let shared_service = Arc::new(service);\n // for return\n let service = shared_service.clone();\n\n // let message_sink = tokio::sync::\n // let mut stream = std::pin::pin!(stream);\n let serve_loop_ct = ct.child_token();\n let peer_return: Peer = peer.clone();\n let handle = tokio::spawn(async move {\n let mut transport = transport.into_transport();\n let mut batch_messages = VecDeque::>::new();\n let mut send_task_set = tokio::task::JoinSet::::new();\n #[derive(Debug)]\n enum SendTaskResult {\n Request {\n id: RequestId,\n result: Result<(), DynamicTransportError>,\n },\n Notification {\n responder: Responder>,\n cancellation_param: Option,\n result: Result<(), DynamicTransportError>,\n },\n }\n #[derive(Debug)]\n enum Event {\n ProxyMessage(PeerSinkMessage),\n PeerMessage(RxJsonRpcMessage),\n ToSink(TxJsonRpcMessage),\n SendTaskResult(SendTaskResult),\n }\n\n let quit_reason = loop {\n let evt = if let Some(m) = batch_messages.pop_front() {\n Event::PeerMessage(m)\n } else {\n tokio::select! {\n m = sink_proxy_rx.recv(), if !sink_proxy_rx.is_closed() => {\n if let Some(m) = m {\n Event::ToSink(m)\n } else {\n continue\n }\n }\n m = transport.receive() => {\n if let Some(m) = m {\n Event::PeerMessage(m)\n } else {\n // input stream closed\n tracing::info!(\"input stream terminated\");\n break QuitReason::Closed\n }\n }\n m = peer_rx.recv(), if !peer_rx.is_closed() => {\n if let Some(m) = m {\n Event::ProxyMessage(m)\n } else {\n continue\n }\n }\n m = send_task_set.join_next(), if !send_task_set.is_empty() => {\n let Some(result) = m else {\n continue\n };\n match result {\n Err(e) => {\n // join error, which is serious, we should quit.\n tracing::error!(%e, \"send request task encounter a tokio join error\");\n break QuitReason::JoinError(e)\n }\n Ok(result) => {\n Event::SendTaskResult(result)\n }\n }\n }\n _ = serve_loop_ct.cancelled() => {\n tracing::info!(\"task cancelled\");\n break QuitReason::Cancelled\n }\n }\n };\n\n tracing::trace!(?evt, \"new event\");\n match evt {\n Event::SendTaskResult(SendTaskResult::Request { id, result }) => {\n if let Err(e) = result {\n if let Some(responder) = local_responder_pool.remove(&id) {\n let _ = responder.send(Err(ServiceError::TransportSend(e)));\n }\n }\n }\n Event::SendTaskResult(SendTaskResult::Notification {\n responder,\n result,\n cancellation_param,\n }) => {\n let response = if let Err(e) = result {\n Err(ServiceError::TransportSend(e))\n } else {\n Ok(())\n };\n let _ = responder.send(response);\n if let Some(param) = cancellation_param {\n if let Some(responder) = local_responder_pool.remove(¶m.request_id) {\n tracing::info!(id = %param.request_id, reason = param.reason, \"cancelled\");\n let _response_result = responder.send(Err(ServiceError::Cancelled {\n reason: param.reason.clone(),\n }));\n }\n }\n }\n // response and error\n Event::ToSink(m) => {\n if let Some(id) = match &m {\n JsonRpcMessage::Response(response) => Some(&response.id),\n JsonRpcMessage::Error(error) => Some(&error.id),\n _ => None,\n } {\n if let Some(ct) = local_ct_pool.remove(id) {\n ct.cancel();\n }\n let send = transport.send(m);\n tokio::spawn(async move {\n let send_result = send.await;\n if let Err(error) = send_result {\n tracing::error!(%error, \"fail to response message\");\n }\n });\n }\n }\n Event::ProxyMessage(PeerSinkMessage::Request {\n request,\n id,\n responder,\n }) => {\n local_responder_pool.insert(id.clone(), responder);\n let send = transport.send(JsonRpcMessage::request(request, id.clone()));\n {\n let id = id.clone();\n send_task_set.spawn(send.map(move |r| SendTaskResult::Request {\n id,\n result: r.map_err(DynamicTransportError::new::),\n }));\n }\n }\n Event::ProxyMessage(PeerSinkMessage::Notification {\n notification,\n responder,\n }) => {\n // catch cancellation notification\n let mut cancellation_param = None;\n let notification = match notification.try_into() {\n Ok::(cancelled) => {\n cancellation_param.replace(cancelled.params.clone());\n cancelled.into()\n }\n Err(notification) => notification,\n };\n let send = transport.send(JsonRpcMessage::notification(notification));\n send_task_set.spawn(send.map(move |result| SendTaskResult::Notification {\n responder,\n cancellation_param,\n result: result.map_err(DynamicTransportError::new::),\n }));\n }\n Event::PeerMessage(JsonRpcMessage::Request(JsonRpcRequest {\n id,\n mut request,\n ..\n })) => {\n tracing::debug!(%id, ?request, \"received request\");\n {\n let service = shared_service.clone();\n let sink = sink_proxy_tx.clone();\n let request_ct = serve_loop_ct.child_token();\n let context_ct = request_ct.child_token();\n local_ct_pool.insert(id.clone(), request_ct);\n let mut extensions = Extensions::new();\n let mut meta = Meta::new();\n // avoid clone\n // swap meta firstly, otherwise progress token will be lost\n std::mem::swap(&mut meta, request.get_meta_mut());\n std::mem::swap(&mut extensions, request.extensions_mut());\n let context = RequestContext {\n ct: context_ct,\n id: id.clone(),\n peer: peer.clone(),\n meta,\n extensions,\n };\n tokio::spawn(async move {\n let result = service.handle_request(request, context).await;\n let response = match result {\n Ok(result) => {\n tracing::debug!(%id, ?result, \"response message\");\n JsonRpcMessage::response(result, id)\n }\n Err(error) => {\n tracing::warn!(%id, ?error, \"response error\");\n JsonRpcMessage::error(error, id)\n }\n };\n let _send_result = sink.send(response).await;\n });\n }\n }\n Event::PeerMessage(JsonRpcMessage::Notification(JsonRpcNotification {\n notification,\n ..\n })) => {\n tracing::info!(?notification, \"received notification\");\n // catch cancelled notification\n let mut notification = match notification.try_into() {\n Ok::(cancelled) => {\n if let Some(ct) = local_ct_pool.remove(&cancelled.params.request_id) {\n tracing::info!(id = %cancelled.params.request_id, reason = cancelled.params.reason, \"cancelled\");\n ct.cancel();\n }\n cancelled.into()\n }\n Err(notification) => notification,\n };\n {\n let service = shared_service.clone();\n let mut extensions = Extensions::new();\n let mut meta = Meta::new();\n // avoid clone\n std::mem::swap(&mut extensions, notification.extensions_mut());\n std::mem::swap(&mut meta, notification.get_meta_mut());\n let context = NotificationContext {\n peer: peer.clone(),\n meta,\n extensions,\n };\n tokio::spawn(async move {\n let result = service.handle_notification(notification, context).await;\n if let Err(error) = result {\n tracing::warn!(%error, \"Error sending notification\");\n }\n });\n }\n }\n Event::PeerMessage(JsonRpcMessage::Response(JsonRpcResponse {\n result,\n id,\n ..\n })) => {\n if let Some(responder) = local_responder_pool.remove(&id) {\n let response_result = responder.send(Ok(result));\n if let Err(_error) = response_result {\n tracing::warn!(%id, \"Error sending response\");\n }\n }\n }\n Event::PeerMessage(JsonRpcMessage::Error(JsonRpcError { error, id, .. })) => {\n if let Some(responder) = local_responder_pool.remove(&id) {\n let _response_result = responder.send(Err(ServiceError::McpError(error)));\n if let Err(_error) = _response_result {\n tracing::warn!(%id, \"Error sending response\");\n }\n }\n }\n Event::PeerMessage(JsonRpcMessage::BatchRequest(batch)) => {\n batch_messages.extend(\n batch\n .into_iter()\n .map(JsonRpcBatchRequestItem::into_non_batch_message),\n );\n }\n Event::PeerMessage(JsonRpcMessage::BatchResponse(batch)) => {\n batch_messages.extend(\n batch\n .into_iter()\n .map(JsonRpcBatchResponseItem::into_non_batch_message),\n );\n }\n }\n };\n let sink_close_result = transport.close().await;\n if let Err(e) = sink_close_result {\n tracing::error!(%e, \"fail to close sink\");\n }\n tracing::info!(?quit_reason, \"serve finished\");\n quit_reason\n });\n RunningService {\n service,\n peer: peer_return,\n handle,\n cancellation_token: ct.clone(),\n dg: ct.drop_guard(),\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/sse_server.rs", "use std::{collections::HashMap, io, net::SocketAddr, sync::Arc, time::Duration};\n\nuse axum::{\n Extension, Json, Router,\n extract::{NestedPath, Query, State},\n http::{StatusCode, request::Parts},\n response::{\n Response,\n sse::{Event, KeepAlive, Sse},\n },\n routing::{get, post},\n};\nuse futures::{Sink, SinkExt, Stream};\nuse tokio_stream::wrappers::ReceiverStream;\nuse tokio_util::sync::{CancellationToken, PollSender};\nuse tracing::Instrument;\n\nuse crate::{\n RoleServer, Service,\n model::ClientJsonRpcMessage,\n service::{RxJsonRpcMessage, TxJsonRpcMessage, serve_directly_with_ct},\n transport::common::server_side_http::{DEFAULT_AUTO_PING_INTERVAL, SessionId, session_id},\n};\n\ntype TxStore =\n Arc>>>;\npub type TransportReceiver = ReceiverStream>;\n\n#[derive(Clone)]\nstruct App {\n txs: TxStore,\n transport_tx: tokio::sync::mpsc::UnboundedSender,\n post_path: Arc,\n sse_ping_interval: Duration,\n}\n\nimpl App {\n pub fn new(\n post_path: String,\n sse_ping_interval: Duration,\n ) -> (\n Self,\n tokio::sync::mpsc::UnboundedReceiver,\n ) {\n let (transport_tx, transport_rx) = tokio::sync::mpsc::unbounded_channel();\n (\n Self {\n txs: Default::default(),\n transport_tx,\n post_path: post_path.into(),\n sse_ping_interval,\n },\n transport_rx,\n )\n }\n}\n\n#[derive(Debug, serde::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct PostEventQuery {\n pub session_id: String,\n}\n\nasync fn post_event_handler(\n State(app): State,\n Query(PostEventQuery { session_id }): Query,\n parts: Parts,\n Json(mut message): Json,\n) -> Result {\n tracing::debug!(session_id, ?parts, ?message, \"new client message\");\n let tx = {\n let rg = app.txs.read().await;\n rg.get(session_id.as_str())\n .ok_or(StatusCode::NOT_FOUND)?\n .clone()\n };\n message.insert_extension(parts);\n if tx.send(message).await.is_err() {\n tracing::error!(\"send message error\");\n return Err(StatusCode::GONE);\n }\n Ok(StatusCode::ACCEPTED)\n}\n\nasync fn sse_handler(\n State(app): State,\n nested_path: Option>,\n parts: Parts,\n) -> Result>>, Response> {\n let session = session_id();\n tracing::info!(%session, ?parts, \"sse connection\");\n use tokio_stream::{StreamExt, wrappers::ReceiverStream};\n use tokio_util::sync::PollSender;\n let (from_client_tx, from_client_rx) = tokio::sync::mpsc::channel(64);\n let (to_client_tx, to_client_rx) = tokio::sync::mpsc::channel(64);\n let to_client_tx_clone = to_client_tx.clone();\n\n app.txs\n .write()\n .await\n .insert(session.clone(), from_client_tx);\n let session = session.clone();\n let stream = ReceiverStream::new(from_client_rx);\n let sink = PollSender::new(to_client_tx);\n let transport = SseServerTransport {\n stream,\n sink,\n session_id: session.clone(),\n tx_store: app.txs.clone(),\n };\n let transport_send_result = app.transport_tx.send(transport);\n if transport_send_result.is_err() {\n tracing::warn!(\"send transport out error\");\n let mut response =\n Response::new(\"fail to send out transport, it seems server is closed\".to_string());\n *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;\n return Err(response);\n }\n let nested_path = nested_path.as_deref().map(NestedPath::as_str).unwrap_or(\"\");\n let post_path = app.post_path.as_ref();\n let ping_interval = app.sse_ping_interval;\n let stream = futures::stream::once(futures::future::ok(\n Event::default()\n .event(\"endpoint\")\n .data(format!(\"{nested_path}{post_path}?sessionId={session}\")),\n ))\n .chain(ReceiverStream::new(to_client_rx).map(|message| {\n match serde_json::to_string(&message) {\n Ok(bytes) => Ok(Event::default().event(\"message\").data(&bytes)),\n Err(e) => Err(io::Error::new(io::ErrorKind::InvalidData, e)),\n }\n }));\n\n tokio::spawn(async move {\n // Wait for connection closure\n to_client_tx_clone.closed().await;\n\n // Clean up session\n let session_id = session.clone();\n let tx_store = app.txs.clone();\n let mut txs = tx_store.write().await;\n txs.remove(&session_id);\n tracing::debug!(%session_id, \"Closed session and cleaned up resources\");\n });\n\n Ok(Sse::new(stream).keep_alive(KeepAlive::new().interval(ping_interval)))\n}\n\npub struct SseServerTransport {\n stream: ReceiverStream>,\n sink: PollSender>,\n session_id: SessionId,\n tx_store: TxStore,\n}\n\nimpl Sink> for SseServerTransport {\n type Error = io::Error;\n\n fn poll_ready(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.sink\n .poll_ready_unpin(cx)\n .map_err(std::io::Error::other)\n }\n\n fn start_send(\n mut self: std::pin::Pin<&mut Self>,\n item: TxJsonRpcMessage,\n ) -> Result<(), Self::Error> {\n self.sink\n .start_send_unpin(item)\n .map_err(std::io::Error::other)\n }\n\n fn poll_flush(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.sink\n .poll_flush_unpin(cx)\n .map_err(std::io::Error::other)\n }\n\n fn poll_close(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n let inner_close_result = self\n .sink\n .poll_close_unpin(cx)\n .map_err(std::io::Error::other);\n if inner_close_result.is_ready() {\n let session_id = self.session_id.clone();\n let tx_store = self.tx_store.clone();\n tokio::spawn(async move {\n tx_store.write().await.remove(&session_id);\n });\n }\n inner_close_result\n }\n}\n\nimpl Stream for SseServerTransport {\n type Item = RxJsonRpcMessage;\n\n fn poll_next(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n use futures::StreamExt;\n self.stream.poll_next_unpin(cx)\n }\n}\n\n#[derive(Debug, Clone)]\npub struct SseServerConfig {\n pub bind: SocketAddr,\n pub sse_path: String,\n pub post_path: String,\n pub ct: CancellationToken,\n pub sse_keep_alive: Option,\n}\n\n#[derive(Debug)]\npub struct SseServer {\n transport_rx: tokio::sync::mpsc::UnboundedReceiver,\n pub config: SseServerConfig,\n}\n\nimpl SseServer {\n pub async fn serve(bind: SocketAddr) -> io::Result {\n Self::serve_with_config(SseServerConfig {\n bind,\n sse_path: \"/sse\".to_string(),\n post_path: \"/message\".to_string(),\n ct: CancellationToken::new(),\n sse_keep_alive: None,\n })\n .await\n }\n pub async fn serve_with_config(config: SseServerConfig) -> io::Result {\n let (sse_server, service) = Self::new(config);\n let listener = tokio::net::TcpListener::bind(sse_server.config.bind).await?;\n let ct = sse_server.config.ct.child_token();\n let server = axum::serve(listener, service).with_graceful_shutdown(async move {\n ct.cancelled().await;\n tracing::info!(\"sse server cancelled\");\n });\n tokio::spawn(\n async move {\n if let Err(e) = server.await {\n tracing::error!(error = %e, \"sse server shutdown with error\");\n }\n }\n .instrument(tracing::info_span!(\"sse-server\", bind_address = %sse_server.config.bind)),\n );\n Ok(sse_server)\n }\n\n pub fn new(config: SseServerConfig) -> (SseServer, Router) {\n let (app, transport_rx) = App::new(\n config.post_path.clone(),\n config.sse_keep_alive.unwrap_or(DEFAULT_AUTO_PING_INTERVAL),\n );\n let router = Router::new()\n .route(&config.sse_path, get(sse_handler))\n .route(&config.post_path, post(post_event_handler))\n .with_state(app);\n\n let server = SseServer {\n transport_rx,\n config,\n };\n\n (server, router)\n }\n\n pub fn with_service(mut self, service_provider: F) -> CancellationToken\n where\n S: Service,\n F: Fn() -> S + Send + 'static,\n {\n use crate::service::ServiceExt;\n let ct = self.config.ct.clone();\n tokio::spawn(async move {\n while let Some(transport) = self.next_transport().await {\n let service = service_provider();\n let ct = self.config.ct.child_token();\n tokio::spawn(async move {\n let server = service\n .serve_with_ct(transport, ct)\n .await\n .map_err(std::io::Error::other)?;\n server.waiting().await?;\n tokio::io::Result::Ok(())\n });\n }\n });\n ct\n }\n\n /// This allows you to skip the initialization steps for incoming request.\n pub fn with_service_directly(mut self, service_provider: F) -> CancellationToken\n where\n S: Service,\n F: Fn() -> S + Send + 'static,\n {\n let ct = self.config.ct.clone();\n tokio::spawn(async move {\n while let Some(transport) = self.next_transport().await {\n let service = service_provider();\n let ct = self.config.ct.child_token();\n tokio::spawn(async move {\n let server = serve_directly_with_ct(service, transport, None, ct);\n server.waiting().await?;\n tokio::io::Result::Ok(())\n });\n }\n });\n ct\n }\n\n pub fn cancel(&self) {\n self.config.ct.cancel();\n }\n\n pub async fn next_transport(&mut self) -> Option {\n self.transport_rx.recv().await\n }\n}\n\nimpl Stream for SseServer {\n type Item = SseServerTransport;\n\n fn poll_next(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.transport_rx.poll_recv(cx)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/reqwest/streamable_http_client.rs", "use std::sync::Arc;\n\nuse futures::{StreamExt, stream::BoxStream};\nuse reqwest::header::ACCEPT;\nuse sse_stream::{Sse, SseStream};\n\nuse crate::{\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::{\n common::http_header::{\n EVENT_STREAM_MIME_TYPE, HEADER_LAST_EVENT_ID, HEADER_SESSION_ID, JSON_MIME_TYPE,\n },\n streamable_http_client::*,\n },\n};\n\nimpl StreamableHttpClient for reqwest::Client {\n type Error = reqwest::Error;\n\n async fn get_stream(\n &self,\n uri: Arc,\n session_id: Arc,\n last_event_id: Option,\n auth_token: Option,\n ) -> Result>, StreamableHttpError> {\n let mut request_builder = self\n .get(uri.as_ref())\n .header(ACCEPT, EVENT_STREAM_MIME_TYPE)\n .header(HEADER_SESSION_ID, session_id.as_ref());\n if let Some(last_event_id) = last_event_id {\n request_builder = request_builder.header(HEADER_LAST_EVENT_ID, last_event_id);\n }\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n let response = request_builder.send().await?;\n if response.status() == reqwest::StatusCode::METHOD_NOT_ALLOWED {\n return Err(StreamableHttpError::SeverDoesNotSupportSse);\n }\n let response = response.error_for_status()?;\n match response.headers().get(reqwest::header::CONTENT_TYPE) {\n Some(ct) => {\n if !ct.as_bytes().starts_with(EVENT_STREAM_MIME_TYPE.as_bytes()) {\n return Err(StreamableHttpError::UnexpectedContentType(Some(\n String::from_utf8_lossy(ct.as_bytes()).to_string(),\n )));\n }\n }\n None => {\n return Err(StreamableHttpError::UnexpectedContentType(None));\n }\n }\n let event_stream = SseStream::from_byte_stream(response.bytes_stream()).boxed();\n Ok(event_stream)\n }\n\n async fn delete_session(\n &self,\n uri: Arc,\n session: Arc,\n auth_token: Option,\n ) -> Result<(), StreamableHttpError> {\n let mut request_builder = self.delete(uri.as_ref());\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n let response = request_builder\n .header(HEADER_SESSION_ID, session.as_ref())\n .send()\n .await?;\n\n // if method no allowed\n if response.status() == reqwest::StatusCode::METHOD_NOT_ALLOWED {\n tracing::debug!(\"this server doesn't support deleting session\");\n return Ok(());\n }\n let _response = response.error_for_status()?;\n Ok(())\n }\n\n async fn post_message(\n &self,\n uri: Arc,\n message: ClientJsonRpcMessage,\n session_id: Option>,\n auth_token: Option,\n ) -> Result> {\n let mut request = self\n .post(uri.as_ref())\n .header(ACCEPT, [EVENT_STREAM_MIME_TYPE, JSON_MIME_TYPE].join(\", \"));\n if let Some(auth_header) = auth_token {\n request = request.bearer_auth(auth_header);\n }\n if let Some(session_id) = session_id {\n request = request.header(HEADER_SESSION_ID, session_id.as_ref());\n }\n let response = request.json(&message).send().await?.error_for_status()?;\n if response.status() == reqwest::StatusCode::ACCEPTED {\n return Ok(StreamableHttpPostResponse::Accepted);\n }\n let content_type = response.headers().get(reqwest::header::CONTENT_TYPE);\n let session_id = response.headers().get(HEADER_SESSION_ID);\n let session_id = session_id\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_string());\n match content_type {\n Some(ct) if ct.as_bytes().starts_with(EVENT_STREAM_MIME_TYPE.as_bytes()) => {\n let event_stream = SseStream::from_byte_stream(response.bytes_stream()).boxed();\n Ok(StreamableHttpPostResponse::Sse(event_stream, session_id))\n }\n Some(ct) if ct.as_bytes().starts_with(JSON_MIME_TYPE.as_bytes()) => {\n let message: ServerJsonRpcMessage = response.json().await?;\n Ok(StreamableHttpPostResponse::Json(message, session_id))\n }\n _ => {\n // unexpected content type\n tracing::error!(\"unexpected content type: {:?}\", content_type);\n Err(StreamableHttpError::UnexpectedContentType(\n content_type.map(|ct| String::from_utf8_lossy(ct.as_bytes()).to_string()),\n ))\n }\n }\n }\n}\n\nimpl StreamableHttpClientTransport {\n pub fn from_uri(uri: impl Into>) -> Self {\n StreamableHttpClientTransport::with_client(\n reqwest::Client::default(),\n StreamableHttpClientTransportConfig {\n uri: uri.into(),\n ..Default::default()\n },\n )\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model.rs", "use std::{borrow::Cow, sync::Arc};\nmod annotated;\nmod capabilities;\nmod content;\nmod extension;\nmod meta;\nmod prompt;\nmod resource;\nmod serde_impl;\nmod tool;\npub use annotated::*;\npub use capabilities::*;\npub use content::*;\npub use extension::*;\npub use meta::*;\npub use prompt::*;\npub use resource::*;\nuse serde::{Deserialize, Serialize};\nuse serde_json::Value;\npub use tool::*;\n\n/// A JSON object type alias for convenient handling of JSON data.\n///\n/// You can use [`crate::object!`] or [`crate::model::object`] to create a json object quickly.\n/// This is commonly used for storing arbitrary JSON data in MCP messages.\npub type JsonObject = serde_json::Map;\n\n/// unwrap the JsonObject under [`serde_json::Value`]\n///\n/// # Panic\n/// This will panic when the value is not a object in debug mode.\npub fn object(value: serde_json::Value) -> JsonObject {\n debug_assert!(value.is_object());\n match value {\n serde_json::Value::Object(map) => map,\n _ => JsonObject::default(),\n }\n}\n\n/// Use this macro just like [`serde_json::json!`]\n#[cfg(feature = \"macros\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"macros\")))]\n#[macro_export]\nmacro_rules! object {\n ({$($tt:tt)*}) => {\n $crate::model::object(serde_json::json! {\n {$($tt)*}\n })\n };\n}\n\n/// This is commonly used for representing empty objects in MCP messages.\n///\n/// without returning any specific data.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Copy, Eq)]\n#[cfg_attr(feature = \"server\", derive(schemars::JsonSchema))]\npub struct EmptyObject {}\n\npub trait ConstString: Default {\n const VALUE: &str;\n}\n#[macro_export]\nmacro_rules! const_string {\n ($name:ident = $value:literal) => {\n #[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]\n pub struct $name;\n\n impl ConstString for $name {\n const VALUE: &str = $value;\n }\n\n impl serde::Serialize for $name {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n $value.serialize(serializer)\n }\n }\n\n impl<'de> serde::Deserialize<'de> for $name {\n fn deserialize(deserializer: D) -> Result<$name, D::Error>\n where\n D: serde::Deserializer<'de>,\n {\n let s: String = serde::Deserialize::deserialize(deserializer)?;\n if s == $value {\n Ok($name)\n } else {\n Err(serde::de::Error::custom(format!(concat!(\n \"expect const string value \\\"\",\n $value,\n \"\\\"\"\n ))))\n }\n }\n }\n\n #[cfg(feature = \"schemars\")]\n impl schemars::JsonSchema for $name {\n fn schema_name() -> Cow<'static, str> {\n Cow::Borrowed(stringify!($name))\n }\n\n fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema {\n use serde_json::{Map, json};\n\n let mut schema_map = Map::new();\n schema_map.insert(\"type\".to_string(), json!(\"string\"));\n schema_map.insert(\"format\".to_string(), json!(\"const\"));\n schema_map.insert(\"const\".to_string(), json!($value));\n\n schemars::Schema::from(schema_map)\n }\n }\n };\n}\n\nconst_string!(JsonRpcVersion2_0 = \"2.0\");\n\n// =============================================================================\n// CORE PROTOCOL TYPES\n// =============================================================================\n\n/// Represents the MCP protocol version used for communication.\n///\n/// This ensures compatibility between clients and servers by specifying\n/// which version of the Model Context Protocol is being used.\n#[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ProtocolVersion(Cow<'static, str>);\n\nimpl Default for ProtocolVersion {\n fn default() -> Self {\n Self::LATEST\n }\n}\n\nimpl std::fmt::Display for ProtocolVersion {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n self.0.fmt(f)\n }\n}\n\nimpl ProtocolVersion {\n pub const V_2025_03_26: Self = Self(Cow::Borrowed(\"2025-03-26\"));\n pub const V_2024_11_05: Self = Self(Cow::Borrowed(\"2024-11-05\"));\n pub const LATEST: Self = Self::V_2025_03_26;\n}\n\nimpl Serialize for ProtocolVersion {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n self.0.serialize(serializer)\n }\n}\n\nimpl<'de> Deserialize<'de> for ProtocolVersion {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let s: String = Deserialize::deserialize(deserializer)?;\n #[allow(clippy::single_match)]\n match s.as_str() {\n \"2024-11-05\" => return Ok(ProtocolVersion::V_2024_11_05),\n \"2025-03-26\" => return Ok(ProtocolVersion::V_2025_03_26),\n _ => {}\n }\n Ok(ProtocolVersion(Cow::Owned(s)))\n }\n}\n\n/// A flexible identifier type that can be either a number or a string.\n///\n/// This is commonly used for request IDs and other identifiers in JSON-RPC\n/// where the specification allows both numeric and string values.\n#[derive(Debug, Clone, Eq, PartialEq, Hash)]\npub enum NumberOrString {\n /// A numeric identifier\n Number(u32),\n /// A string identifier \n String(Arc),\n}\n\nimpl NumberOrString {\n pub fn into_json_value(self) -> Value {\n match self {\n NumberOrString::Number(n) => Value::Number(serde_json::Number::from(n)),\n NumberOrString::String(s) => Value::String(s.to_string()),\n }\n }\n}\n\nimpl std::fmt::Display for NumberOrString {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n NumberOrString::Number(n) => n.fmt(f),\n NumberOrString::String(s) => s.fmt(f),\n }\n }\n}\n\nimpl Serialize for NumberOrString {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n match self {\n NumberOrString::Number(n) => n.serialize(serializer),\n NumberOrString::String(s) => s.serialize(serializer),\n }\n }\n}\n\nimpl<'de> Deserialize<'de> for NumberOrString {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let value: Value = Deserialize::deserialize(deserializer)?;\n match value {\n Value::Number(n) => Ok(NumberOrString::Number(\n n.as_u64()\n .ok_or(serde::de::Error::custom(\"Expect an integer\"))? as u32,\n )),\n Value::String(s) => Ok(NumberOrString::String(s.into())),\n _ => Err(serde::de::Error::custom(\"Expect number or string\")),\n }\n }\n}\n\n#[cfg(feature = \"schemars\")]\nimpl schemars::JsonSchema for NumberOrString {\n fn schema_name() -> Cow<'static, str> {\n Cow::Borrowed(\"NumberOrString\")\n }\n\n fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema {\n use serde_json::{Map, json};\n\n let mut number_schema = Map::new();\n number_schema.insert(\"type\".to_string(), json!(\"number\"));\n\n let mut string_schema = Map::new();\n string_schema.insert(\"type\".to_string(), json!(\"string\"));\n\n let mut schema_map = Map::new();\n schema_map.insert(\"oneOf\".to_string(), json!([number_schema, string_schema]));\n\n schemars::Schema::from(schema_map)\n }\n}\n\n/// Type alias for request identifiers used in JSON-RPC communication.\npub type RequestId = NumberOrString;\n\n/// A token used to track the progress of long-running operations.\n///\n/// Progress tokens allow clients and servers to associate progress notifications\n/// with specific requests, enabling real-time updates on operation status.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Hash, Eq)]\n#[serde(transparent)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ProgressToken(pub NumberOrString);\n\n// =============================================================================\n// JSON-RPC MESSAGE STRUCTURES\n// =============================================================================\n\n/// Represents a JSON-RPC request with method, parameters, and extensions.\n///\n/// This is the core structure for all MCP requests, containing:\n/// - `method`: The name of the method being called\n/// - `params`: The parameters for the method\n/// - `extensions`: Additional context data (similar to HTTP headers)\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Request {\n pub method: M,\n pub params: P,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl Request {\n pub fn new(params: P) -> Self {\n Self {\n method: Default::default(),\n params,\n extensions: Extensions::default(),\n }\n }\n}\n\nimpl GetExtensions for Request {\n fn extensions(&self) -> &Extensions {\n &self.extensions\n }\n fn extensions_mut(&mut self) -> &mut Extensions {\n &mut self.extensions\n }\n}\n\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RequestOptionalParam {\n pub method: M,\n // #[serde(skip_serializing_if = \"Option::is_none\")]\n pub params: Option

,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl RequestOptionalParam {\n pub fn with_param(params: P) -> Self {\n Self {\n method: Default::default(),\n params: Some(params),\n extensions: Extensions::default(),\n }\n }\n}\n\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RequestNoParam {\n pub method: M,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl GetExtensions for RequestNoParam {\n fn extensions(&self) -> &Extensions {\n &self.extensions\n }\n fn extensions_mut(&mut self) -> &mut Extensions {\n &mut self.extensions\n }\n}\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Notification {\n pub method: M,\n pub params: P,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl Notification {\n pub fn new(params: P) -> Self {\n Self {\n method: Default::default(),\n params,\n extensions: Extensions::default(),\n }\n }\n}\n\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct NotificationNoParam {\n pub method: M,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcRequest {\n pub jsonrpc: JsonRpcVersion2_0,\n pub id: RequestId,\n #[serde(flatten)]\n pub request: R,\n}\n\ntype DefaultResponse = JsonObject;\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcResponse {\n pub jsonrpc: JsonRpcVersion2_0,\n pub id: RequestId,\n pub result: R,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcError {\n pub jsonrpc: JsonRpcVersion2_0,\n pub id: RequestId,\n pub error: ErrorData,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcNotification {\n pub jsonrpc: JsonRpcVersion2_0,\n #[serde(flatten)]\n pub notification: N,\n}\n\n/// Standard JSON-RPC error codes used throughout the MCP protocol.\n///\n/// These codes follow the JSON-RPC 2.0 specification and provide\n/// standardized error reporting across all MCP implementations.\n#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(transparent)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ErrorCode(pub i32);\n\nimpl ErrorCode {\n pub const RESOURCE_NOT_FOUND: Self = Self(-32002);\n pub const INVALID_REQUEST: Self = Self(-32600);\n pub const METHOD_NOT_FOUND: Self = Self(-32601);\n pub const INVALID_PARAMS: Self = Self(-32602);\n pub const INTERNAL_ERROR: Self = Self(-32603);\n pub const PARSE_ERROR: Self = Self(-32700);\n}\n\n/// Error information for JSON-RPC error responses.\n///\n/// This structure follows the JSON-RPC 2.0 specification for error reporting,\n/// providing a standardized way to communicate errors between clients and servers.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ErrorData {\n /// The error type that occurred (using standard JSON-RPC error codes)\n pub code: ErrorCode,\n\n /// A short description of the error. The message SHOULD be limited to a concise single sentence.\n pub message: Cow<'static, str>,\n\n /// Additional information about the error. The value of this member is defined by the\n /// sender (e.g. detailed error information, nested errors etc.).\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub data: Option,\n}\n\nimpl ErrorData {\n pub fn new(\n code: ErrorCode,\n message: impl Into>,\n data: Option,\n ) -> Self {\n Self {\n code,\n message: message.into(),\n data,\n }\n }\n pub fn resource_not_found(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::RESOURCE_NOT_FOUND, message, data)\n }\n pub fn parse_error(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::PARSE_ERROR, message, data)\n }\n pub fn invalid_request(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::INVALID_REQUEST, message, data)\n }\n pub fn method_not_found() -> Self {\n Self::new(ErrorCode::METHOD_NOT_FOUND, M::VALUE, None)\n }\n pub fn invalid_params(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::INVALID_PARAMS, message, data)\n }\n pub fn internal_error(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::INTERNAL_ERROR, message, data)\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum JsonRpcBatchRequestItem {\n Request(JsonRpcRequest),\n Notification(JsonRpcNotification),\n}\n\nimpl JsonRpcBatchRequestItem {\n pub fn into_non_batch_message(self) -> JsonRpcMessage {\n match self {\n JsonRpcBatchRequestItem::Request(r) => JsonRpcMessage::Request(r),\n JsonRpcBatchRequestItem::Notification(n) => JsonRpcMessage::Notification(n),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum JsonRpcBatchResponseItem {\n Response(JsonRpcResponse),\n Error(JsonRpcError),\n}\n\nimpl JsonRpcBatchResponseItem {\n pub fn into_non_batch_message(self) -> JsonRpcMessage {\n match self {\n JsonRpcBatchResponseItem::Response(r) => JsonRpcMessage::Response(r),\n JsonRpcBatchResponseItem::Error(e) => JsonRpcMessage::Error(e),\n }\n }\n}\n\n/// Represents any JSON-RPC message that can be sent or received.\n///\n/// This enum covers all possible message types in the JSON-RPC protocol:\n/// individual requests/responses, notifications, batch operations, and errors.\n/// It serves as the top-level message container for MCP communication.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum JsonRpcMessage {\n /// A single request expecting a response\n Request(JsonRpcRequest),\n /// A response to a previous request\n Response(JsonRpcResponse),\n /// A one-way notification (no response expected)\n Notification(JsonRpcNotification),\n /// Multiple requests sent together\n BatchRequest(Vec>),\n /// Multiple responses sent together\n BatchResponse(Vec>),\n /// An error response\n Error(JsonRpcError),\n}\n\nimpl JsonRpcMessage {\n #[inline]\n pub const fn request(request: Req, id: RequestId) -> Self {\n JsonRpcMessage::Request(JsonRpcRequest {\n jsonrpc: JsonRpcVersion2_0,\n id,\n request,\n })\n }\n #[inline]\n pub const fn response(response: Resp, id: RequestId) -> Self {\n JsonRpcMessage::Response(JsonRpcResponse {\n jsonrpc: JsonRpcVersion2_0,\n id,\n result: response,\n })\n }\n #[inline]\n pub const fn error(error: ErrorData, id: RequestId) -> Self {\n JsonRpcMessage::Error(JsonRpcError {\n jsonrpc: JsonRpcVersion2_0,\n id,\n error,\n })\n }\n #[inline]\n pub const fn notification(notification: Not) -> Self {\n JsonRpcMessage::Notification(JsonRpcNotification {\n jsonrpc: JsonRpcVersion2_0,\n notification,\n })\n }\n pub fn into_request(self) -> Option<(Req, RequestId)> {\n match self {\n JsonRpcMessage::Request(r) => Some((r.request, r.id)),\n _ => None,\n }\n }\n pub fn into_response(self) -> Option<(Resp, RequestId)> {\n match self {\n JsonRpcMessage::Response(r) => Some((r.result, r.id)),\n _ => None,\n }\n }\n pub fn into_notification(self) -> Option {\n match self {\n JsonRpcMessage::Notification(n) => Some(n.notification),\n _ => None,\n }\n }\n pub fn into_error(self) -> Option<(ErrorData, RequestId)> {\n match self {\n JsonRpcMessage::Error(e) => Some((e.error, e.id)),\n _ => None,\n }\n }\n pub fn into_result(self) -> Option<(Result, RequestId)> {\n match self {\n JsonRpcMessage::Response(r) => Some((Ok(r.result), r.id)),\n JsonRpcMessage::Error(e) => Some((Err(e.error), e.id)),\n\n _ => None,\n }\n }\n}\n\n// =============================================================================\n// INITIALIZATION AND CONNECTION SETUP\n// =============================================================================\n\n/// # Empty result\n/// A response that indicates success but carries no data.\npub type EmptyResult = EmptyObject;\n\nimpl From<()> for EmptyResult {\n fn from(_value: ()) -> Self {\n EmptyResult {}\n }\n}\n\nimpl From for () {\n fn from(_value: EmptyResult) {}\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CancelledNotificationParam {\n pub request_id: RequestId,\n pub reason: Option,\n}\n\nconst_string!(CancelledNotificationMethod = \"notifications/cancelled\");\n\n/// # Cancellation\n/// This notification can be sent by either side to indicate that it is cancelling a previously-issued request.\n///\n/// The request SHOULD still be in-flight, but due to communication latency, it is always possible that this notification MAY arrive after the request has already finished.\n///\n/// This notification indicates that the result will be unused, so any associated processing SHOULD cease.\n///\n/// A client MUST NOT attempt to cancel its `initialize` request.\npub type CancelledNotification =\n Notification;\n\nconst_string!(InitializeResultMethod = \"initialize\");\n/// # Initialization\n/// This request is sent from the client to the server when it first connects, asking it to begin initialization.\npub type InitializeRequest = Request;\n\nconst_string!(InitializedNotificationMethod = \"notifications/initialized\");\n/// This notification is sent from the client to the server after initialization has finished.\npub type InitializedNotification = NotificationNoParam;\n\n/// Parameters sent by a client when initializing a connection to an MCP server.\n///\n/// This contains the client's protocol version, capabilities, and implementation\n/// information, allowing the server to understand what the client supports.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct InitializeRequestParam {\n /// The MCP protocol version this client supports\n pub protocol_version: ProtocolVersion,\n /// The capabilities this client supports (sampling, roots, etc.)\n pub capabilities: ClientCapabilities,\n /// Information about the client implementation\n pub client_info: Implementation,\n}\n\n/// The server's response to an initialization request.\n///\n/// Contains the server's protocol version, capabilities, and implementation\n/// information, along with optional instructions for the client.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct InitializeResult {\n /// The MCP protocol version this server supports\n pub protocol_version: ProtocolVersion,\n /// The capabilities this server provides (tools, resources, prompts, etc.)\n pub capabilities: ServerCapabilities,\n /// Information about the server implementation\n pub server_info: Implementation,\n /// Optional human-readable instructions about using this server\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub instructions: Option,\n}\n\npub type ServerInfo = InitializeResult;\npub type ClientInfo = InitializeRequestParam;\n\nimpl Default for ServerInfo {\n fn default() -> Self {\n ServerInfo {\n protocol_version: ProtocolVersion::default(),\n capabilities: ServerCapabilities::default(),\n server_info: Implementation::from_build_env(),\n instructions: None,\n }\n }\n}\n\nimpl Default for ClientInfo {\n fn default() -> Self {\n ClientInfo {\n protocol_version: ProtocolVersion::default(),\n capabilities: ClientCapabilities::default(),\n client_info: Implementation::from_build_env(),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Implementation {\n pub name: String,\n pub version: String,\n}\n\nimpl Default for Implementation {\n fn default() -> Self {\n Self::from_build_env()\n }\n}\n\nimpl Implementation {\n pub fn from_build_env() -> Self {\n Implementation {\n name: env!(\"CARGO_CRATE_NAME\").to_owned(),\n version: env!(\"CARGO_PKG_VERSION\").to_owned(),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PaginatedRequestParam {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub cursor: Option,\n}\n// =============================================================================\n// PROGRESS AND PAGINATION\n// =============================================================================\n\nconst_string!(PingRequestMethod = \"ping\");\npub type PingRequest = RequestNoParam;\n\nconst_string!(ProgressNotificationMethod = \"notifications/progress\");\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ProgressNotificationParam {\n pub progress_token: ProgressToken,\n /// The progress thus far. This should increase every time progress is made, even if the total is unknown.\n pub progress: u32,\n /// Total number of items to process (or total progress required), if known\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub total: Option,\n /// An optional message describing the current progress.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub message: Option,\n}\n\npub type ProgressNotification = Notification;\n\npub type Cursor = String;\n\nmacro_rules! paginated_result {\n ($t:ident {\n $i_item: ident: $t_item: ty\n }) => {\n #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n #[serde(rename_all = \"camelCase\")]\n #[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\n pub struct $t {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub next_cursor: Option,\n pub $i_item: $t_item,\n }\n\n impl $t {\n pub fn with_all_items(\n items: $t_item,\n ) -> Self {\n Self {\n next_cursor: None,\n $i_item: items,\n }\n }\n }\n };\n}\n\n// =============================================================================\n// RESOURCE MANAGEMENT\n// =============================================================================\n\nconst_string!(ListResourcesRequestMethod = \"resources/list\");\n/// Request to list all available resources from a server\npub type ListResourcesRequest =\n RequestOptionalParam;\n\npaginated_result!(ListResourcesResult {\n resources: Vec\n});\n\nconst_string!(ListResourceTemplatesRequestMethod = \"resources/templates/list\");\n/// Request to list all available resource templates from a server\npub type ListResourceTemplatesRequest =\n RequestOptionalParam;\n\npaginated_result!(ListResourceTemplatesResult {\n resource_templates: Vec\n});\n\nconst_string!(ReadResourceRequestMethod = \"resources/read\");\n/// Parameters for reading a specific resource\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ReadResourceRequestParam {\n /// The URI of the resource to read\n pub uri: String,\n}\n\n/// Result containing the contents of a read resource\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ReadResourceResult {\n /// The actual content of the resource\n pub contents: Vec,\n}\n\n/// Request to read a specific resource\npub type ReadResourceRequest = Request;\n\nconst_string!(ResourceListChangedNotificationMethod = \"notifications/resources/list_changed\");\n/// Notification sent when the list of available resources changes\npub type ResourceListChangedNotification =\n NotificationNoParam;\n\nconst_string!(SubscribeRequestMethod = \"resources/subscribe\");\n/// Parameters for subscribing to resource updates\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct SubscribeRequestParam {\n /// The URI of the resource to subscribe to\n pub uri: String,\n}\n/// Request to subscribe to resource updates\npub type SubscribeRequest = Request;\n\nconst_string!(UnsubscribeRequestMethod = \"resources/unsubscribe\");\n/// Parameters for unsubscribing from resource updates\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct UnsubscribeRequestParam {\n /// The URI of the resource to unsubscribe from\n pub uri: String,\n}\n/// Request to unsubscribe from resource updates\npub type UnsubscribeRequest = Request;\n\nconst_string!(ResourceUpdatedNotificationMethod = \"notifications/resources/updated\");\n/// Parameters for a resource update notification\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ResourceUpdatedNotificationParam {\n /// The URI of the resource that was updated\n pub uri: String,\n}\n/// Notification sent when a subscribed resource is updated\npub type ResourceUpdatedNotification =\n Notification;\n\n// =============================================================================\n// PROMPT MANAGEMENT\n// =============================================================================\n\nconst_string!(ListPromptsRequestMethod = \"prompts/list\");\n/// Request to list all available prompts from a server\npub type ListPromptsRequest = RequestOptionalParam;\n\npaginated_result!(ListPromptsResult {\n prompts: Vec\n});\n\nconst_string!(GetPromptRequestMethod = \"prompts/get\");\n/// Parameters for retrieving a specific prompt\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct GetPromptRequestParam {\n pub name: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub arguments: Option,\n}\n/// Request to get a specific prompt\npub type GetPromptRequest = Request;\n\nconst_string!(PromptListChangedNotificationMethod = \"notifications/prompts/list_changed\");\n/// Notification sent when the list of available prompts changes\npub type PromptListChangedNotification = NotificationNoParam;\n\nconst_string!(ToolListChangedNotificationMethod = \"notifications/tools/list_changed\");\n/// Notification sent when the list of available tools changes\npub type ToolListChangedNotification = NotificationNoParam;\n\n// =============================================================================\n// LOGGING\n// =============================================================================\n\n/// Logging levels supported by the MCP protocol\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Copy)]\n#[serde(rename_all = \"lowercase\")] //match spec\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum LoggingLevel {\n Debug,\n Info,\n Notice,\n Warning,\n Error,\n Critical,\n Alert,\n Emergency,\n}\n\nconst_string!(SetLevelRequestMethod = \"logging/setLevel\");\n/// Parameters for setting the logging level\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct SetLevelRequestParam {\n /// The desired logging level\n pub level: LoggingLevel,\n}\n/// Request to set the logging level\npub type SetLevelRequest = Request;\n\nconst_string!(LoggingMessageNotificationMethod = \"notifications/message\");\n/// Parameters for a logging message notification\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct LoggingMessageNotificationParam {\n /// The severity level of this log message\n pub level: LoggingLevel,\n /// Optional logger name that generated this message\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub logger: Option,\n /// The actual log data\n pub data: Value,\n}\n/// Notification containing a log message\npub type LoggingMessageNotification =\n Notification;\n\n// =============================================================================\n// SAMPLING (LLM INTERACTION)\n// =============================================================================\n\nconst_string!(CreateMessageRequestMethod = \"sampling/createMessage\");\npub type CreateMessageRequest = Request;\n\n/// Represents the role of a participant in a conversation or message exchange.\n///\n/// Used in sampling and chat contexts to distinguish between different\n/// types of message senders in the conversation flow.\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum Role {\n /// A human user or client making a request\n User,\n /// An AI assistant or server providing a response\n Assistant,\n}\n\n/// A message in a sampling conversation, containing a role and content.\n///\n/// This represents a single message in a conversation flow, used primarily\n/// in LLM sampling requests where the conversation history is important\n/// for generating appropriate responses.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct SamplingMessage {\n /// The role of the message sender (User or Assistant)\n pub role: Role,\n /// The actual content of the message (text, image, etc.)\n pub content: Content,\n}\n\n/// Specifies how much context should be included in sampling requests.\n///\n/// This allows clients to control what additional context information\n/// should be provided to the LLM when processing sampling requests.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum ContextInclusion {\n /// Include context from all connected MCP servers\n #[serde(rename = \"allServers\")]\n AllServers,\n /// Include no additional context\n #[serde(rename = \"none\")]\n None,\n /// Include context only from the requesting server\n #[serde(rename = \"thisServer\")]\n ThisServer,\n}\n\n/// Parameters for creating a message through LLM sampling.\n///\n/// This structure contains all the necessary information for a client to\n/// generate an LLM response, including conversation history, model preferences,\n/// and generation parameters.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CreateMessageRequestParam {\n /// The conversation history and current messages\n pub messages: Vec,\n /// Preferences for model selection and behavior\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub model_preferences: Option,\n /// System prompt to guide the model's behavior\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub system_prompt: Option,\n /// How much context to include from MCP servers\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub include_context: Option,\n /// Temperature for controlling randomness (0.0 to 1.0)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub temperature: Option,\n /// Maximum number of tokens to generate\n pub max_tokens: u32,\n /// Sequences that should stop generation\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub stop_sequences: Option>,\n /// Additional metadata for the request\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub metadata: Option,\n}\n\n/// Preferences for model selection and behavior in sampling requests.\n///\n/// This allows servers to express their preferences for which model to use\n/// and how to balance different priorities when the client has multiple\n/// model options available.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ModelPreferences {\n /// Specific model names or families to prefer (e.g., \"claude\", \"gpt\")\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub hints: Option>,\n /// Priority for cost optimization (0.0 to 1.0, higher = prefer cheaper models)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub cost_priority: Option,\n /// Priority for speed/latency (0.0 to 1.0, higher = prefer faster models)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub speed_priority: Option,\n /// Priority for intelligence/capability (0.0 to 1.0, higher = prefer more capable models)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub intelligence_priority: Option,\n}\n\n/// A hint suggesting a preferred model name or family.\n///\n/// Model hints are advisory suggestions that help clients choose appropriate\n/// models. They can be specific model names or general families like \"claude\" or \"gpt\".\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ModelHint {\n /// The suggested model name or family identifier\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub name: Option,\n}\n\n// =============================================================================\n// COMPLETION AND AUTOCOMPLETE\n// =============================================================================\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CompleteRequestParam {\n pub r#ref: Reference,\n pub argument: ArgumentInfo,\n}\n\npub type CompleteRequest = Request;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CompletionInfo {\n pub values: Vec,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub total: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub has_more: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CompleteResult {\n pub completion: CompletionInfo,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(tag = \"type\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum Reference {\n #[serde(rename = \"ref/resource\")]\n Resource(ResourceReference),\n #[serde(rename = \"ref/prompt\")]\n Prompt(PromptReference),\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ResourceReference {\n pub uri: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptReference {\n pub name: String,\n}\n\nconst_string!(CompleteRequestMethod = \"completion/complete\");\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ArgumentInfo {\n pub name: String,\n pub value: String,\n}\n\n// =============================================================================\n// ROOTS AND WORKSPACE MANAGEMENT\n// =============================================================================\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Root {\n pub uri: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub name: Option,\n}\n\nconst_string!(ListRootsRequestMethod = \"roots/list\");\npub type ListRootsRequest = RequestNoParam;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ListRootsResult {\n pub roots: Vec,\n}\n\nconst_string!(RootsListChangedNotificationMethod = \"notifications/roots/list_changed\");\npub type RootsListChangedNotification = NotificationNoParam;\n\n// =============================================================================\n// TOOL EXECUTION RESULTS\n// =============================================================================\n\n/// The result of a tool call operation.\n///\n/// Contains the content returned by the tool execution and an optional\n/// flag indicating whether the operation resulted in an error.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CallToolResult {\n /// The content returned by the tool (text, images, etc.)\n pub content: Vec,\n /// Whether this result represents an error condition\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub is_error: Option,\n}\n\nimpl CallToolResult {\n /// Create a successful tool result\n pub fn success(content: Vec) -> Self {\n CallToolResult {\n content,\n is_error: Some(false),\n }\n }\n /// Create an error tool result\n pub fn error(content: Vec) -> Self {\n CallToolResult {\n content,\n is_error: Some(true),\n }\n }\n}\n\nconst_string!(ListToolsRequestMethod = \"tools/list\");\n/// Request to list all available tools from a server\npub type ListToolsRequest = RequestOptionalParam;\n\npaginated_result!(\n ListToolsResult {\n tools: Vec\n }\n);\n\nconst_string!(CallToolRequestMethod = \"tools/call\");\n/// Parameters for calling a tool provided by an MCP server.\n///\n/// Contains the tool name and optional arguments needed to execute\n/// the tool operation.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CallToolRequestParam {\n /// The name of the tool to call\n pub name: Cow<'static, str>,\n /// Arguments to pass to the tool (must match the tool's input schema)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub arguments: Option,\n}\n\n/// Request to call a specific tool\npub type CallToolRequest = Request;\n\n/// The result of a sampling/createMessage request containing the generated response.\n///\n/// This structure contains the generated message along with metadata about\n/// how the generation was performed and why it stopped.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CreateMessageResult {\n /// The identifier of the model that generated the response\n pub model: String,\n /// The reason why generation stopped (e.g., \"endTurn\", \"maxTokens\")\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub stop_reason: Option,\n /// The generated message with role and content\n #[serde(flatten)]\n pub message: SamplingMessage,\n}\n\nimpl CreateMessageResult {\n pub const STOP_REASON_END_TURN: &str = \"endTurn\";\n pub const STOP_REASON_END_SEQUENCE: &str = \"stopSequence\";\n pub const STOP_REASON_END_MAX_TOKEN: &str = \"maxTokens\";\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct GetPromptResult {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n pub messages: Vec,\n}\n\n// =============================================================================\n// MESSAGE TYPE UNIONS\n// =============================================================================\n\nmacro_rules! ts_union {\n (\n export type $U: ident =\n $(|)?$($V: ident)|*;\n ) => {\n #[derive(Debug, Serialize, Deserialize, Clone)]\n #[serde(untagged)]\n #[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\n pub enum $U {\n $($V($V),)*\n }\n };\n}\n\nts_union!(\n export type ClientRequest =\n | PingRequest\n | InitializeRequest\n | CompleteRequest\n | SetLevelRequest\n | GetPromptRequest\n | ListPromptsRequest\n | ListResourcesRequest\n | ListResourceTemplatesRequest\n | ReadResourceRequest\n | SubscribeRequest\n | UnsubscribeRequest\n | CallToolRequest\n | ListToolsRequest;\n);\n\nts_union!(\n export type ClientNotification =\n | CancelledNotification\n | ProgressNotification\n | InitializedNotification\n | RootsListChangedNotification;\n);\n\nts_union!(\n export type ClientResult = CreateMessageResult | ListRootsResult | EmptyResult;\n);\n\nimpl ClientResult {\n pub fn empty(_: ()) -> ClientResult {\n ClientResult::EmptyResult(EmptyResult {})\n }\n}\n\npub type ClientJsonRpcMessage = JsonRpcMessage;\n\nts_union!(\n export type ServerRequest =\n | PingRequest\n | CreateMessageRequest\n | ListRootsRequest;\n);\n\nts_union!(\n export type ServerNotification =\n | CancelledNotification\n | ProgressNotification\n | LoggingMessageNotification\n | ResourceUpdatedNotification\n | ResourceListChangedNotification\n | ToolListChangedNotification\n | PromptListChangedNotification;\n);\n\nts_union!(\n export type ServerResult =\n | InitializeResult\n | CompleteResult\n | GetPromptResult\n | ListPromptsResult\n | ListResourcesResult\n | ListResourceTemplatesResult\n | ReadResourceResult\n | CallToolResult\n | ListToolsResult\n | EmptyResult\n ;\n);\n\nimpl ServerResult {\n pub fn empty(_: ()) -> ServerResult {\n ServerResult::EmptyResult(EmptyResult {})\n }\n}\n\npub type ServerJsonRpcMessage = JsonRpcMessage;\n\nimpl TryInto for ServerNotification {\n type Error = ServerNotification;\n fn try_into(self) -> Result {\n if let ServerNotification::CancelledNotification(t) = self {\n Ok(t)\n } else {\n Err(self)\n }\n }\n}\n\nimpl TryInto for ClientNotification {\n type Error = ClientNotification;\n fn try_into(self) -> Result {\n if let ClientNotification::CancelledNotification(t) = self {\n Ok(t)\n } else {\n Err(self)\n }\n }\n}\nimpl From for ServerNotification {\n fn from(value: CancelledNotification) -> Self {\n ServerNotification::CancelledNotification(value)\n }\n}\n\nimpl From for ClientNotification {\n fn from(value: CancelledNotification) -> Self {\n ClientNotification::CancelledNotification(value)\n }\n}\n\n// =============================================================================\n// TESTS\n// =============================================================================\n\n#[cfg(test)]\nmod tests {\n use serde_json::json;\n\n use super::*;\n\n #[test]\n fn test_notification_serde() {\n let raw = json!( {\n \"jsonrpc\": JsonRpcVersion2_0,\n \"method\": InitializedNotificationMethod,\n });\n let message: ClientJsonRpcMessage =\n serde_json::from_value(raw.clone()).expect(\"invalid notification\");\n match &message {\n ClientJsonRpcMessage::Notification(JsonRpcNotification {\n notification: ClientNotification::InitializedNotification(_n),\n ..\n }) => {}\n _ => panic!(\"Expected Notification\"),\n }\n let json = serde_json::to_value(message).expect(\"valid json\");\n assert_eq!(json, raw);\n }\n\n #[test]\n fn test_request_conversion() {\n let raw = json!( {\n \"jsonrpc\": JsonRpcVersion2_0,\n \"id\": 1,\n \"method\": \"request\",\n \"params\": {\"key\": \"value\"},\n });\n let message: JsonRpcMessage = serde_json::from_value(raw.clone()).expect(\"invalid request\");\n\n match &message {\n JsonRpcMessage::Request(r) => {\n assert_eq!(r.id, RequestId::Number(1));\n assert_eq!(r.request.method, \"request\");\n assert_eq!(\n &r.request.params,\n json!({\"key\": \"value\"})\n .as_object()\n .expect(\"should be an object\")\n );\n }\n _ => panic!(\"Expected Request\"),\n }\n let json = serde_json::to_value(&message).expect(\"valid json\");\n assert_eq!(json, raw);\n }\n\n #[test]\n fn test_initial_request_response_serde() {\n let request = json!({\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"initialize\",\n \"params\": {\n \"protocolVersion\": \"2024-11-05\",\n \"capabilities\": {\n \"roots\": {\n \"listChanged\": true\n },\n \"sampling\": {}\n },\n \"clientInfo\": {\n \"name\": \"ExampleClient\",\n \"version\": \"1.0.0\"\n }\n }\n });\n let raw_response_json = json!({\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"result\": {\n \"protocolVersion\": \"2024-11-05\",\n \"capabilities\": {\n \"logging\": {},\n \"prompts\": {\n \"listChanged\": true\n },\n \"resources\": {\n \"subscribe\": true,\n \"listChanged\": true\n },\n \"tools\": {\n \"listChanged\": true\n }\n },\n \"serverInfo\": {\n \"name\": \"ExampleServer\",\n \"version\": \"1.0.0\"\n }\n }\n });\n let request: ClientJsonRpcMessage =\n serde_json::from_value(request.clone()).expect(\"invalid request\");\n let (request, id) = request.into_request().expect(\"should be a request\");\n assert_eq!(id, RequestId::Number(1));\n match request {\n ClientRequest::InitializeRequest(Request {\n method: _,\n params:\n InitializeRequestParam {\n protocol_version: _,\n capabilities,\n client_info,\n },\n ..\n }) => {\n assert_eq!(capabilities.roots.unwrap().list_changed, Some(true));\n assert_eq!(capabilities.sampling.unwrap().len(), 0);\n assert_eq!(client_info.name, \"ExampleClient\");\n assert_eq!(client_info.version, \"1.0.0\");\n }\n _ => panic!(\"Expected InitializeRequest\"),\n }\n let server_response: ServerJsonRpcMessage =\n serde_json::from_value(raw_response_json.clone()).expect(\"invalid response\");\n let (response, id) = server_response\n .clone()\n .into_response()\n .expect(\"expect response\");\n assert_eq!(id, RequestId::Number(1));\n match response {\n ServerResult::InitializeResult(InitializeResult {\n protocol_version: _,\n capabilities,\n server_info,\n instructions,\n }) => {\n assert_eq!(capabilities.logging.unwrap().len(), 0);\n assert_eq!(capabilities.prompts.unwrap().list_changed, Some(true));\n assert_eq!(\n capabilities.resources.as_ref().unwrap().subscribe,\n Some(true)\n );\n assert_eq!(capabilities.resources.unwrap().list_changed, Some(true));\n assert_eq!(capabilities.tools.unwrap().list_changed, Some(true));\n assert_eq!(server_info.name, \"ExampleServer\");\n assert_eq!(server_info.version, \"1.0.0\");\n assert_eq!(instructions, None);\n }\n other => panic!(\"Expected InitializeResult, got {other:?}\"),\n }\n\n let server_response_json: Value = serde_json::to_value(&server_response).expect(\"msg\");\n\n assert_eq!(server_response_json, raw_response_json);\n }\n\n #[test]\n fn test_protocol_version_order() {\n let v1 = ProtocolVersion::V_2024_11_05;\n let v2 = ProtocolVersion::V_2025_03_26;\n assert!(v1 < v2);\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/service/client.rs", "use std::borrow::Cow;\n\nuse thiserror::Error;\n\nuse super::*;\nuse crate::{\n model::{\n CallToolRequest, CallToolRequestParam, CallToolResult, CancelledNotification,\n CancelledNotificationParam, ClientInfo, ClientJsonRpcMessage, ClientNotification,\n ClientRequest, ClientResult, CompleteRequest, CompleteRequestParam, CompleteResult,\n GetPromptRequest, GetPromptRequestParam, GetPromptResult, InitializeRequest,\n InitializedNotification, JsonRpcResponse, ListPromptsRequest, ListPromptsResult,\n ListResourceTemplatesRequest, ListResourceTemplatesResult, ListResourcesRequest,\n ListResourcesResult, ListToolsRequest, ListToolsResult, PaginatedRequestParam,\n ProgressNotification, ProgressNotificationParam, ReadResourceRequest,\n ReadResourceRequestParam, ReadResourceResult, RequestId, RootsListChangedNotification,\n ServerInfo, ServerJsonRpcMessage, ServerNotification, ServerRequest, ServerResult,\n SetLevelRequest, SetLevelRequestParam, SubscribeRequest, SubscribeRequestParam,\n UnsubscribeRequest, UnsubscribeRequestParam,\n },\n transport::DynamicTransportError,\n};\n\n/// It represents the error that may occur when serving the client.\n///\n/// if you want to handle the error, you can use `serve_client_with_ct` or `serve_client` with `Result, ClientError>`\n#[derive(Error, Debug)]\npub enum ClientInitializeError {\n #[error(\"expect initialized response, but received: {0:?}\")]\n ExpectedInitResponse(Option),\n\n #[error(\"expect initialized result, but received: {0:?}\")]\n ExpectedInitResult(Option),\n\n #[error(\"conflict initialized response id: expected {0}, got {1}\")]\n ConflictInitResponseId(RequestId, RequestId),\n\n #[error(\"connection closed: {0}\")]\n ConnectionClosed(String),\n\n #[error(\"Send message error {error}, when {context}\")]\n TransportError {\n error: DynamicTransportError,\n context: Cow<'static, str>,\n },\n\n #[error(\"Cancelled\")]\n Cancelled,\n}\n\nimpl ClientInitializeError {\n pub fn transport + 'static>(\n error: T::Error,\n context: impl Into>,\n ) -> Self {\n Self::TransportError {\n error: DynamicTransportError::new::(error),\n context: context.into(),\n }\n }\n}\n\n/// Helper function to get the next message from the stream\nasync fn expect_next_message(\n transport: &mut T,\n context: &str,\n) -> Result\nwhere\n T: Transport,\n{\n transport\n .receive()\n .await\n .ok_or_else(|| ClientInitializeError::ConnectionClosed(context.to_string()))\n}\n\n/// Helper function to expect a response from the stream\nasync fn expect_response(\n transport: &mut T,\n context: &str,\n) -> Result<(ServerResult, RequestId), ClientInitializeError>\nwhere\n T: Transport,\n{\n let msg = expect_next_message(transport, context).await?;\n\n match msg {\n ServerJsonRpcMessage::Response(JsonRpcResponse { id, result, .. }) => Ok((result, id)),\n _ => Err(ClientInitializeError::ExpectedInitResponse(Some(msg))),\n }\n}\n\n#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]\npub struct RoleClient;\n\nimpl ServiceRole for RoleClient {\n type Req = ClientRequest;\n type Resp = ClientResult;\n type Not = ClientNotification;\n type PeerReq = ServerRequest;\n type PeerResp = ServerResult;\n type PeerNot = ServerNotification;\n type Info = ClientInfo;\n type PeerInfo = ServerInfo;\n type InitializeError = ClientInitializeError;\n const IS_CLIENT: bool = true;\n}\n\npub type ServerSink = Peer;\n\nimpl> ServiceExt for S {\n fn serve_with_ct(\n self,\n transport: T,\n ct: CancellationToken,\n ) -> impl Future, ClientInitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized,\n {\n serve_client_with_ct(self, transport, ct)\n }\n}\n\npub async fn serve_client(\n service: S,\n transport: T,\n) -> Result, ClientInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n serve_client_with_ct(service, transport, Default::default()).await\n}\n\npub async fn serve_client_with_ct(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ClientInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n tokio::select! {\n result = serve_client_with_ct_inner(service, transport.into_transport(), ct.clone()) => { result }\n _ = ct.cancelled() => {\n Err(ClientInitializeError::Cancelled)\n }\n }\n}\n\nasync fn serve_client_with_ct_inner(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ClientInitializeError>\nwhere\n S: Service,\n T: Transport + 'static,\n{\n let mut transport = transport.into_transport();\n let id_provider = >::default();\n\n // service\n let id = id_provider.next_request_id();\n let init_request = InitializeRequest {\n method: Default::default(),\n params: service.get_info(),\n extensions: Default::default(),\n };\n transport\n .send(ClientJsonRpcMessage::request(\n ClientRequest::InitializeRequest(init_request),\n id.clone(),\n ))\n .await\n .map_err(|error| ClientInitializeError::TransportError {\n error: DynamicTransportError::new::(error),\n context: \"send initialize request\".into(),\n })?;\n\n let (response, response_id) = expect_response(&mut transport, \"initialize response\").await?;\n\n if id != response_id {\n return Err(ClientInitializeError::ConflictInitResponseId(\n id,\n response_id,\n ));\n }\n\n let ServerResult::InitializeResult(initialize_result) = response else {\n return Err(ClientInitializeError::ExpectedInitResult(Some(response)));\n };\n\n // send notification\n let notification = ClientJsonRpcMessage::notification(\n ClientNotification::InitializedNotification(InitializedNotification {\n method: Default::default(),\n extensions: Default::default(),\n }),\n );\n transport.send(notification).await.map_err(|error| {\n ClientInitializeError::transport::(error, \"send initialized notification\")\n })?;\n let (peer, peer_rx) = Peer::new(id_provider, Some(initialize_result));\n Ok(serve_inner(service, transport, peer, peer_rx, ct))\n}\n\nmacro_rules! method {\n (peer_req $method:ident $Req:ident() => $Resp: ident ) => {\n pub async fn $method(&self) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident) => $Resp: ident ) => {\n pub async fn $method(&self, params: $Param) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident)? => $Resp: ident ) => {\n pub async fn $method(&self, params: Option<$Param>) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident)) => {\n pub async fn $method(&self, params: $Param) -> Result<(), ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::EmptyResult(_) => Ok(()),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n\n (peer_not $method:ident $Not:ident($Param: ident)) => {\n pub async fn $method(&self, params: $Param) -> Result<(), ServiceError> {\n self.send_notification(ClientNotification::$Not($Not {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n (peer_not $method:ident $Not:ident) => {\n pub async fn $method(&self) -> Result<(), ServiceError> {\n self.send_notification(ClientNotification::$Not($Not {\n method: Default::default(),\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n}\n\nimpl Peer {\n method!(peer_req complete CompleteRequest(CompleteRequestParam) => CompleteResult);\n method!(peer_req set_level SetLevelRequest(SetLevelRequestParam));\n method!(peer_req get_prompt GetPromptRequest(GetPromptRequestParam) => GetPromptResult);\n method!(peer_req list_prompts ListPromptsRequest(PaginatedRequestParam)? => ListPromptsResult);\n method!(peer_req list_resources ListResourcesRequest(PaginatedRequestParam)? => ListResourcesResult);\n method!(peer_req list_resource_templates ListResourceTemplatesRequest(PaginatedRequestParam)? => ListResourceTemplatesResult);\n method!(peer_req read_resource ReadResourceRequest(ReadResourceRequestParam) => ReadResourceResult);\n method!(peer_req subscribe SubscribeRequest(SubscribeRequestParam) );\n method!(peer_req unsubscribe UnsubscribeRequest(UnsubscribeRequestParam));\n method!(peer_req call_tool CallToolRequest(CallToolRequestParam) => CallToolResult);\n method!(peer_req list_tools ListToolsRequest(PaginatedRequestParam)? => ListToolsResult);\n\n method!(peer_not notify_cancelled CancelledNotification(CancelledNotificationParam));\n method!(peer_not notify_progress ProgressNotification(ProgressNotificationParam));\n method!(peer_not notify_initialized InitializedNotification);\n method!(peer_not notify_roots_list_changed RootsListChangedNotification);\n}\n\nimpl Peer {\n /// A wrapper method for [`Peer::list_tools`].\n ///\n /// This function will call [`Peer::list_tools`] multiple times until all tools are listed.\n pub async fn list_all_tools(&self) -> Result, ServiceError> {\n let mut tools = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_tools(Some(PaginatedRequestParam { cursor }))\n .await?;\n tools.extend(result.tools);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(tools)\n }\n\n /// A wrapper method for [`Peer::list_prompts`].\n ///\n /// This function will call [`Peer::list_prompts`] multiple times until all prompts are listed.\n pub async fn list_all_prompts(&self) -> Result, ServiceError> {\n let mut prompts = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_prompts(Some(PaginatedRequestParam { cursor }))\n .await?;\n prompts.extend(result.prompts);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(prompts)\n }\n\n /// A wrapper method for [`Peer::list_resources`].\n ///\n /// This function will call [`Peer::list_resources`] multiple times until all resources are listed.\n pub async fn list_all_resources(&self) -> Result, ServiceError> {\n let mut resources = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_resources(Some(PaginatedRequestParam { cursor }))\n .await?;\n resources.extend(result.resources);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(resources)\n }\n\n /// A wrapper method for [`Peer::list_resource_templates`].\n ///\n /// This function will call [`Peer::list_resource_templates`] multiple times until all resource templates are listed.\n pub async fn list_all_resource_templates(\n &self,\n ) -> Result, ServiceError> {\n let mut resource_templates = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_resource_templates(Some(PaginatedRequestParam { cursor }))\n .await?;\n resource_templates.extend(result.resource_templates);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(resource_templates)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/service/server.rs", "use std::borrow::Cow;\n\nuse thiserror::Error;\n\nuse super::*;\nuse crate::{\n model::{\n CancelledNotification, CancelledNotificationParam, ClientInfo, ClientJsonRpcMessage,\n ClientNotification, ClientRequest, ClientResult, CreateMessageRequest,\n CreateMessageRequestParam, CreateMessageResult, ErrorData, ListRootsRequest,\n ListRootsResult, LoggingMessageNotification, LoggingMessageNotificationParam,\n ProgressNotification, ProgressNotificationParam, PromptListChangedNotification,\n ProtocolVersion, ResourceListChangedNotification, ResourceUpdatedNotification,\n ResourceUpdatedNotificationParam, ServerInfo, ServerNotification, ServerRequest,\n ServerResult, ToolListChangedNotification,\n },\n transport::DynamicTransportError,\n};\n\n#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]\npub struct RoleServer;\n\nimpl ServiceRole for RoleServer {\n type Req = ServerRequest;\n type Resp = ServerResult;\n type Not = ServerNotification;\n type PeerReq = ClientRequest;\n type PeerResp = ClientResult;\n type PeerNot = ClientNotification;\n type Info = ServerInfo;\n type PeerInfo = ClientInfo;\n\n type InitializeError = ServerInitializeError;\n const IS_CLIENT: bool = false;\n}\n\n/// It represents the error that may occur when serving the server.\n///\n/// if you want to handle the error, you can use `serve_server_with_ct` or `serve_server` with `Result, ServerError>`\n#[derive(Error, Debug)]\npub enum ServerInitializeError {\n #[error(\"expect initialized request, but received: {0:?}\")]\n ExpectedInitializeRequest(Option),\n\n #[error(\"expect initialized notification, but received: {0:?}\")]\n ExpectedInitializedNotification(Option),\n\n #[error(\"connection closed: {0}\")]\n ConnectionClosed(String),\n\n #[error(\"unexpected initialize result: {0:?}\")]\n UnexpectedInitializeResponse(ServerResult),\n\n #[error(\"initialize failed: {0}\")]\n InitializeFailed(ErrorData),\n\n #[error(\"unsupported protocol version: {0}\")]\n UnsupportedProtocolVersion(ProtocolVersion),\n\n #[error(\"Send message error {error}, when {context}\")]\n TransportError {\n error: DynamicTransportError,\n context: Cow<'static, str>,\n },\n\n #[error(\"Cancelled\")]\n Cancelled,\n}\n\nimpl ServerInitializeError {\n pub fn transport + 'static>(\n error: T::Error,\n context: impl Into>,\n ) -> Self {\n Self::TransportError {\n error: DynamicTransportError::new::(error),\n context: context.into(),\n }\n }\n}\npub type ClientSink = Peer;\n\nimpl> ServiceExt for S {\n fn serve_with_ct(\n self,\n transport: T,\n ct: CancellationToken,\n ) -> impl Future, ServerInitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized,\n {\n serve_server_with_ct(self, transport, ct)\n }\n}\n\npub async fn serve_server(\n service: S,\n transport: T,\n) -> Result, ServerInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n serve_server_with_ct(service, transport, CancellationToken::new()).await\n}\n\n/// Helper function to get the next message from the stream\nasync fn expect_next_message(\n transport: &mut T,\n context: &str,\n) -> Result\nwhere\n T: Transport,\n{\n transport\n .receive()\n .await\n .ok_or_else(|| ServerInitializeError::ConnectionClosed(context.to_string()))\n}\n\n/// Helper function to expect a request from the stream\nasync fn expect_request(\n transport: &mut T,\n context: &str,\n) -> Result<(ClientRequest, RequestId), ServerInitializeError>\nwhere\n T: Transport,\n{\n let msg = expect_next_message(transport, context).await?;\n let msg_clone = msg.clone();\n msg.into_request()\n .ok_or(ServerInitializeError::ExpectedInitializeRequest(Some(\n msg_clone,\n )))\n}\n\n/// Helper function to expect a notification from the stream\nasync fn expect_notification(\n transport: &mut T,\n context: &str,\n) -> Result\nwhere\n T: Transport,\n{\n let msg = expect_next_message(transport, context).await?;\n let msg_clone = msg.clone();\n msg.into_notification()\n .ok_or(ServerInitializeError::ExpectedInitializedNotification(\n Some(msg_clone),\n ))\n}\n\npub async fn serve_server_with_ct(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ServerInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n tokio::select! {\n result = serve_server_with_ct_inner(service, transport.into_transport(), ct.clone()) => { result }\n _ = ct.cancelled() => {\n Err(ServerInitializeError::Cancelled)\n }\n }\n}\n\nasync fn serve_server_with_ct_inner(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ServerInitializeError>\nwhere\n S: Service,\n T: Transport + 'static,\n{\n let mut transport = transport.into_transport();\n let id_provider = >::default();\n\n // Get initialize request\n let (request, id) = expect_request(&mut transport, \"initialized request\").await?;\n\n let ClientRequest::InitializeRequest(peer_info) = &request else {\n return Err(ServerInitializeError::ExpectedInitializeRequest(Some(\n ClientJsonRpcMessage::request(request, id),\n )));\n };\n let (peer, peer_rx) = Peer::new(id_provider, Some(peer_info.params.clone()));\n let context = RequestContext {\n ct: ct.child_token(),\n id: id.clone(),\n meta: request.get_meta().clone(),\n extensions: request.extensions().clone(),\n peer: peer.clone(),\n };\n // Send initialize response\n let init_response = service.handle_request(request.clone(), context).await;\n let mut init_response = match init_response {\n Ok(ServerResult::InitializeResult(init_response)) => init_response,\n Ok(result) => {\n return Err(ServerInitializeError::UnexpectedInitializeResponse(result));\n }\n Err(e) => {\n transport\n .send(ServerJsonRpcMessage::error(e.clone(), id))\n .await\n .map_err(|error| {\n ServerInitializeError::transport::(error, \"sending error response\")\n })?;\n return Err(ServerInitializeError::InitializeFailed(e));\n }\n };\n let peer_protocol_version = peer_info.params.protocol_version.clone();\n let protocol_version = match peer_protocol_version\n .partial_cmp(&init_response.protocol_version)\n .ok_or(ServerInitializeError::UnsupportedProtocolVersion(\n peer_protocol_version,\n ))? {\n std::cmp::Ordering::Less => peer_info.params.protocol_version.clone(),\n _ => init_response.protocol_version,\n };\n init_response.protocol_version = protocol_version;\n transport\n .send(ServerJsonRpcMessage::response(\n ServerResult::InitializeResult(init_response),\n id,\n ))\n .await\n .map_err(|error| {\n ServerInitializeError::transport::(error, \"sending initialize response\")\n })?;\n\n // Wait for initialize notification\n let notification = expect_notification(&mut transport, \"initialize notification\").await?;\n let ClientNotification::InitializedNotification(_) = notification else {\n return Err(ServerInitializeError::ExpectedInitializedNotification(\n Some(ClientJsonRpcMessage::notification(notification)),\n ));\n };\n let context = NotificationContext {\n meta: notification.get_meta().clone(),\n extensions: notification.extensions().clone(),\n peer: peer.clone(),\n };\n let _ = service.handle_notification(notification, context).await;\n // Continue processing service\n Ok(serve_inner(service, transport, peer, peer_rx, ct))\n}\n\nmacro_rules! method {\n (peer_req $method:ident $Req:ident() => $Resp: ident ) => {\n pub async fn $method(&self) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ServerRequest::$Req($Req {\n method: Default::default(),\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ClientResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident) => $Resp: ident ) => {\n pub async fn $method(&self, params: $Param) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ServerRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ClientResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident)) => {\n pub fn $method(\n &self,\n params: $Param,\n ) -> impl Future> + Send + '_ {\n async move {\n let result = self\n .send_request(ServerRequest::$Req($Req {\n method: Default::default(),\n params,\n }))\n .await?;\n match result {\n ClientResult::EmptyResult(_) => Ok(()),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n }\n };\n\n (peer_not $method:ident $Not:ident($Param: ident)) => {\n pub async fn $method(&self, params: $Param) -> Result<(), ServiceError> {\n self.send_notification(ServerNotification::$Not($Not {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n (peer_not $method:ident $Not:ident) => {\n pub async fn $method(&self) -> Result<(), ServiceError> {\n self.send_notification(ServerNotification::$Not($Not {\n method: Default::default(),\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n}\n\nimpl Peer {\n method!(peer_req create_message CreateMessageRequest(CreateMessageRequestParam) => CreateMessageResult);\n method!(peer_req list_roots ListRootsRequest() => ListRootsResult);\n\n method!(peer_not notify_cancelled CancelledNotification(CancelledNotificationParam));\n method!(peer_not notify_progress ProgressNotification(ProgressNotificationParam));\n method!(peer_not notify_logging_message LoggingMessageNotification(LoggingMessageNotificationParam));\n method!(peer_not notify_resource_updated ResourceUpdatedNotification(ResourceUpdatedNotificationParam));\n method!(peer_not notify_resource_list_changed ResourceListChangedNotification);\n method!(peer_not notify_tool_list_changed ToolListChangedNotification);\n method!(peer_not notify_prompt_list_changed PromptListChangedNotification);\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/async_rw.rs", "use std::{marker::PhantomData, sync::Arc};\n\n// use crate::schema::*;\nuse futures::{SinkExt, StreamExt};\nuse serde::{Serialize, de::DeserializeOwned};\nuse thiserror::Error;\nuse tokio::{\n io::{AsyncRead, AsyncWrite},\n sync::Mutex,\n};\nuse tokio_util::{\n bytes::{Buf, BufMut, BytesMut},\n codec::{Decoder, Encoder, FramedRead, FramedWrite},\n};\n\nuse super::{IntoTransport, Transport};\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\npub enum TransportAdapterAsyncRW {}\n\nimpl IntoTransport for (R, W)\nwhere\n Role: ServiceRole,\n R: AsyncRead + Send + 'static + Unpin,\n W: AsyncWrite + Send + 'static + Unpin,\n{\n fn into_transport(self) -> impl Transport + 'static {\n AsyncRwTransport::new(self.0, self.1)\n }\n}\n\npub enum TransportAdapterAsyncCombinedRW {}\nimpl IntoTransport for S\nwhere\n Role: ServiceRole,\n S: AsyncRead + AsyncWrite + Send + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n IntoTransport::::into_transport(\n tokio::io::split(self),\n )\n }\n}\n\npub struct AsyncRwTransport {\n read: FramedRead>>,\n write: Arc>>>>,\n}\n\nimpl AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n pub fn new(read: R, write: W) -> Self {\n let read = FramedRead::new(\n read,\n JsonRpcMessageCodec::>::default(),\n );\n let write = Arc::new(Mutex::new(FramedWrite::new(\n write,\n JsonRpcMessageCodec::>::default(),\n )));\n Self { read, write }\n }\n}\n\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\nimpl AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n pub fn new_client(read: R, write: W) -> Self {\n Self::new(read, write)\n }\n}\n\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\nimpl AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n pub fn new_server(read: R, write: W) -> Self {\n Self::new(read, write)\n }\n}\n\nimpl Transport for AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n type Error = std::io::Error;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let lock = self.write.clone();\n async move {\n let mut write = lock.lock().await;\n write.send(item).await.map_err(Into::into)\n }\n }\n\n fn receive(&mut self) -> impl Future>> {\n let next = self.read.next();\n async {\n next.await.and_then(|e| {\n e.inspect_err(|e| {\n tracing::error!(\"Error reading from stream: {}\", e);\n })\n .ok()\n })\n }\n }\n\n async fn close(&mut self) -> Result<(), Self::Error> {\n Ok(())\n }\n}\n\n#[derive(Debug, Clone)]\npub struct JsonRpcMessageCodec {\n _marker: PhantomData T>,\n next_index: usize,\n max_length: usize,\n is_discarding: bool,\n}\n\nimpl Default for JsonRpcMessageCodec {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl JsonRpcMessageCodec {\n pub fn new() -> Self {\n Self {\n _marker: PhantomData,\n next_index: 0,\n max_length: usize::MAX,\n is_discarding: false,\n }\n }\n\n pub fn new_with_max_length(max_length: usize) -> Self {\n Self {\n max_length,\n ..Self::new()\n }\n }\n\n pub fn max_length(&self) -> usize {\n self.max_length\n }\n}\n\nfn without_carriage_return(s: &[u8]) -> &[u8] {\n if let Some(&b'\\r') = s.last() {\n &s[..s.len() - 1]\n } else {\n s\n }\n}\n\n/// Check if a notification method is a standard MCP notification\n/// should update this when MCP spec is updated about new notifications\nfn is_standard_notification(method: &str) -> bool {\n matches!(\n method,\n \"notifications/cancelled\"\n | \"notifications/initialized\"\n | \"notifications/message\"\n | \"notifications/progress\"\n | \"notifications/prompts/list_changed\"\n | \"notifications/resources/list_changed\"\n | \"notifications/resources/updated\"\n | \"notifications/roots/list_changed\"\n | \"notifications/tools/list_changed\"\n )\n}\n\n/// Try to parse a message with compatibility handling for non-standard notifications\nfn try_parse_with_compatibility(\n line: &[u8],\n context: &str,\n) -> Result, JsonRpcMessageCodecError> {\n if let Ok(line_str) = std::str::from_utf8(line) {\n match serde_json::from_slice(line) {\n Ok(item) => Ok(Some(item)),\n Err(e) => {\n // Check if this is a non-standard notification that should be ignored\n if line_str.contains(\"\\\"method\\\":\\\"notifications/\") {\n // Extract the method name to check if it's standard\n if let Ok(json_value) = serde_json::from_str::(line_str) {\n if let Some(method) = json_value.get(\"method\").and_then(|m| m.as_str()) {\n if method.starts_with(\"notifications/\")\n && !is_standard_notification(method)\n {\n tracing::debug!(\n \"Ignoring non-standard notification {} {}: {}\",\n method,\n context,\n line_str\n );\n return Ok(None); // Skip this message\n }\n }\n }\n }\n\n tracing::debug!(\n \"Failed to parse message {}: {} | Error: {}\",\n context,\n line_str,\n e\n );\n Err(JsonRpcMessageCodecError::Serde(e))\n }\n }\n } else {\n serde_json::from_slice(line)\n .map(Some)\n .map_err(JsonRpcMessageCodecError::Serde)\n }\n}\n\n#[derive(Debug, Error)]\npub enum JsonRpcMessageCodecError {\n #[error(\"max line length exceeded\")]\n MaxLineLengthExceeded,\n #[error(\"serde error {0}\")]\n Serde(#[from] serde_json::Error),\n #[error(\"io error {0}\")]\n Io(#[from] std::io::Error),\n}\n\nimpl From for std::io::Error {\n fn from(value: JsonRpcMessageCodecError) -> Self {\n match value {\n JsonRpcMessageCodecError::MaxLineLengthExceeded => {\n std::io::Error::new(std::io::ErrorKind::InvalidData, value)\n }\n JsonRpcMessageCodecError::Serde(e) => e.into(),\n JsonRpcMessageCodecError::Io(e) => e,\n }\n }\n}\n\nimpl Decoder for JsonRpcMessageCodec {\n type Item = T;\n\n type Error = JsonRpcMessageCodecError;\n\n fn decode(\n &mut self,\n buf: &mut BytesMut,\n ) -> Result, JsonRpcMessageCodecError> {\n loop {\n // Determine how far into the buffer we'll search for a newline. If\n // there's no max_length set, we'll read to the end of the buffer.\n let read_to = std::cmp::min(self.max_length.saturating_add(1), buf.len());\n\n let newline_offset = buf[self.next_index..read_to]\n .iter()\n .position(|b| *b == b'\\n');\n\n match (self.is_discarding, newline_offset) {\n (true, Some(offset)) => {\n // If we found a newline, discard up to that offset and\n // then stop discarding. On the next iteration, we'll try\n // to read a line normally.\n buf.advance(offset + self.next_index + 1);\n self.is_discarding = false;\n self.next_index = 0;\n }\n (true, None) => {\n // Otherwise, we didn't find a newline, so we'll discard\n // everything we read. On the next iteration, we'll continue\n // discarding up to max_len bytes unless we find a newline.\n buf.advance(read_to);\n self.next_index = 0;\n if buf.is_empty() {\n return Ok(None);\n }\n }\n (false, Some(offset)) => {\n // Found a line!\n let newline_index = offset + self.next_index;\n self.next_index = 0;\n let line = buf.split_to(newline_index + 1);\n let line = &line[..line.len() - 1];\n let line = without_carriage_return(line);\n\n // Use compatibility handling function\n let item = match try_parse_with_compatibility(line, \"decode\")? {\n Some(item) => item,\n None => return Ok(None), // Skip non-standard message\n };\n return Ok(Some(item));\n }\n (false, None) if buf.len() > self.max_length => {\n // Reached the maximum length without finding a\n // newline, return an error and start discarding on the\n // next call.\n self.is_discarding = true;\n return Err(JsonRpcMessageCodecError::MaxLineLengthExceeded);\n }\n (false, None) => {\n // We didn't find a line or reach the length limit, so the next\n // call will resume searching at the current offset.\n self.next_index = read_to;\n return Ok(None);\n }\n }\n }\n }\n\n fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, JsonRpcMessageCodecError> {\n Ok(match self.decode(buf)? {\n Some(frame) => Some(frame),\n None => {\n self.next_index = 0;\n // No terminating newline - return remaining data, if any\n if buf.is_empty() || buf == &b\"\\r\"[..] {\n None\n } else {\n let line = buf.split_to(buf.len());\n let line = without_carriage_return(&line);\n\n // Use compatibility handling function\n let item = match try_parse_with_compatibility(line, \"decode_eof\")? {\n Some(item) => item,\n None => return Ok(None), // Skip non-standard message\n };\n Some(item)\n }\n }\n })\n }\n}\n\nimpl Encoder for JsonRpcMessageCodec {\n type Error = JsonRpcMessageCodecError;\n\n fn encode(&mut self, item: T, buf: &mut BytesMut) -> Result<(), JsonRpcMessageCodecError> {\n serde_json::to_writer(buf.writer(), &item)?;\n buf.put_u8(b'\\n');\n Ok(())\n }\n}\n\n#[cfg(test)]\nmod test {\n use futures::{Sink, Stream};\n\n use super::*;\n fn from_async_read(reader: R) -> impl Stream {\n FramedRead::new(reader, JsonRpcMessageCodec::::default()).filter_map(|result| {\n if let Err(e) = &result {\n tracing::error!(\"Error reading from stream: {}\", e);\n }\n futures::future::ready(result.ok())\n })\n }\n\n fn from_async_write(\n writer: W,\n ) -> impl Sink {\n FramedWrite::new(writer, JsonRpcMessageCodec::::default()).sink_map_err(Into::into)\n }\n #[tokio::test]\n async fn test_decode() {\n use futures::StreamExt;\n use tokio::io::BufReader;\n\n let data = r#\"{\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":1}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":2}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":3}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":4}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":5}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":6}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":7}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":8}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":9}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":10}\n\n \"#;\n\n let mut cursor = BufReader::new(data.as_bytes());\n let mut stream = from_async_read::(&mut cursor);\n\n for i in 1..=10 {\n let item = stream.next().await.unwrap();\n assert_eq!(\n item,\n serde_json::json!({\n \"jsonrpc\": \"2.0\",\n \"method\": \"subtract\",\n \"params\": if i % 2 != 0 { [42, 23] } else { [23, 42] },\n \"id\": i,\n })\n );\n }\n }\n\n #[tokio::test]\n async fn test_encode() {\n let test_messages = vec![\n serde_json::json!({\n \"jsonrpc\": \"2.0\",\n \"method\": \"subtract\",\n \"params\": [42, 23],\n \"id\": 1,\n }),\n serde_json::json!({\n \"jsonrpc\": \"2.0\",\n \"method\": \"subtract\",\n \"params\": [23, 42],\n \"id\": 2,\n }),\n ];\n\n // Create a buffer to write to\n let mut buffer = Vec::new();\n let mut writer = from_async_write(&mut buffer);\n\n // Write the test messages\n for message in test_messages.iter() {\n writer.send(message.clone()).await.unwrap();\n }\n writer.close().await.unwrap();\n drop(writer);\n // Parse the buffer back into lines and check each one\n let output = String::from_utf8_lossy(&buffer);\n let mut lines = output.lines();\n\n for expected_message in test_messages {\n let line = lines.next().unwrap();\n let parsed_message: serde_json::Value = serde_json::from_str(line).unwrap();\n assert_eq!(parsed_message, expected_message);\n }\n\n // Make sure there are no extra lines\n assert!(lines.next().is_none());\n }\n\n #[test]\n fn test_standard_notification_check() {\n // Test that all standard notifications are recognized\n assert!(is_standard_notification(\"notifications/cancelled\"));\n assert!(is_standard_notification(\"notifications/initialized\"));\n assert!(is_standard_notification(\"notifications/progress\"));\n assert!(is_standard_notification(\n \"notifications/resources/list_changed\"\n ));\n assert!(is_standard_notification(\"notifications/resources/updated\"));\n assert!(is_standard_notification(\n \"notifications/prompts/list_changed\"\n ));\n assert!(is_standard_notification(\"notifications/tools/list_changed\"));\n assert!(is_standard_notification(\"notifications/message\"));\n assert!(is_standard_notification(\"notifications/roots/list_changed\"));\n\n // Test that non-standard notifications are not recognized\n assert!(!is_standard_notification(\"notifications/stderr\"));\n assert!(!is_standard_notification(\"notifications/custom\"));\n assert!(!is_standard_notification(\"notifications/debug\"));\n assert!(!is_standard_notification(\"some/other/method\"));\n }\n\n #[test]\n fn test_compatibility_function() {\n // Test the compatibility function directly\n let stderr_message =\n r#\"{\"method\":\"notifications/stderr\",\"params\":{\"content\":\"stderr message\"}}\"#;\n let custom_message = r#\"{\"method\":\"notifications/custom\",\"params\":{\"data\":\"custom\"}}\"#;\n let standard_message =\n r#\"{\"method\":\"notifications/message\",\"params\":{\"level\":\"info\",\"data\":\"standard\"}}\"#;\n let progress_message = r#\"{\"method\":\"notifications/progress\",\"params\":{\"progressToken\":\"token\",\"progress\":50}}\"#;\n\n // Test with valid JSON - all should parse successfully\n let result1 =\n try_parse_with_compatibility::(stderr_message.as_bytes(), \"test\");\n let result2 =\n try_parse_with_compatibility::(custom_message.as_bytes(), \"test\");\n let result3 =\n try_parse_with_compatibility::(standard_message.as_bytes(), \"test\");\n let result4 =\n try_parse_with_compatibility::(progress_message.as_bytes(), \"test\");\n\n // All should parse successfully since they're valid JSON\n assert!(result1.is_ok());\n assert!(result2.is_ok());\n assert!(result3.is_ok());\n assert!(result4.is_ok());\n\n // Standard notifications should return Some(value)\n assert!(result3.unwrap().is_some());\n assert!(result4.unwrap().is_some());\n\n println!(\"Standard notifications are preserved, non-standard are handled gracefully\");\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport.rs", "//! # Transport\n//! The transport type must implemented [`Transport`] trait, which allow it send message concurrently and receive message sequentially.\n//!\n//! ## Standard Transport Types\n//! There are 3 pairs of standard transport types:\n//!\n//! | transport | client | server |\n//! |:-: |:-: |:-: |\n//! | std IO | [`child_process::TokioChildProcess`] | [`io::stdio`] |\n//! | streamable http | [`streamable_http_client::StreamableHttpClientTransport`] | [`streamable_http_server::StreamableHttpService`] |\n//! | sse | [`sse_client::SseClientTransport`] | [`sse_server::SseServer`] |\n//!\n//!## Helper Transport Types\n//! Thers are several helper transport types that can help you to create transport quickly.\n//!\n//! ### [Worker Transport](`worker::WorkerTransport`)\n//! Which allows you to run a worker and process messages in another tokio task.\n//!\n//! ### [Async Read/Write Transport](`async_rw::AsyncRwTransport`)\n//! You need to enable `transport-async-rw` feature to use this transport.\n//!\n//! This transport is used to create a transport from a byte stream which implemented [`tokio::io::AsyncRead`] and [`tokio::io::AsyncWrite`].\n//!\n//! This could be very helpful when you want to create a transport from a byte stream, such as a file or a tcp connection.\n//!\n//! ### [Sink/Stream Transport](`sink_stream::SinkStreamTransport`)\n//! This transport is used to create a transport from a sink and a stream.\n//!\n//! This could be very helpful when you want to create a transport from a duplex object stream, such as a websocket connection.\n//!\n//! ## [IntoTransport](`IntoTransport`) trait\n//! [`IntoTransport`] is a helper trait that implicitly convert a type into a transport type.\n//!\n//! ### These types is automatically implemented [`IntoTransport`] trait\n//! 1. A type that already implement both [`futures::Sink`] and [`futures::Stream`] trait, or a tuple `(Tx, Rx)` where `Tx` is [`futures::Sink`] and `Rx` is [`futures::Stream`].\n//! 2. A type that implement both [`tokio::io::AsyncRead`] and [`tokio::io::AsyncWrite`] trait. or a tuple `(R, W)` where `R` is [`tokio::io::AsyncRead`] and `W` is [`tokio::io::AsyncWrite`].\n//! 3. A type that implement [Worker](`worker::Worker`) trait.\n//! 4. A type that implement [`Transport`] trait.\n//!\n//! ## Examples\n//!\n//! ```rust\n//! # use rmcp::{\n//! # ServiceExt, serve_client, serve_server,\n//! # };\n//!\n//! // create transport from tcp stream\n//! async fn client() -> Result<(), Box> {\n//! let stream = tokio::net::TcpSocket::new_v4()?\n//! .connect(\"127.0.0.1:8001\".parse()?)\n//! .await?;\n//! let client = ().serve(stream).await?;\n//! let tools = client.peer().list_tools(Default::default()).await?;\n//! println!(\"{:?}\", tools);\n//! Ok(())\n//! }\n//!\n//! // create transport from std io\n//! async fn io() -> Result<(), Box> {\n//! let client = ().serve((tokio::io::stdin(), tokio::io::stdout())).await?;\n//! let tools = client.peer().list_tools(Default::default()).await?;\n//! println!(\"{:?}\", tools);\n//! Ok(())\n//! }\n//! ```\n\nuse std::{borrow::Cow, sync::Arc};\n\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\npub mod sink_stream;\n\n#[cfg(feature = \"transport-async-rw\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-async-rw\")))]\npub mod async_rw;\n\n#[cfg(feature = \"transport-worker\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-worker\")))]\npub mod worker;\n#[cfg(feature = \"transport-worker\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-worker\")))]\npub use worker::WorkerTransport;\n\n#[cfg(feature = \"transport-child-process\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-child-process\")))]\npub mod child_process;\n#[cfg(feature = \"transport-child-process\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-child-process\")))]\npub use child_process::{ConfigureCommandExt, TokioChildProcess};\n\n#[cfg(feature = \"transport-io\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-io\")))]\npub mod io;\n#[cfg(feature = \"transport-io\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-io\")))]\npub use io::stdio;\n\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\npub mod sse_client;\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\npub use sse_client::SseClientTransport;\n\n#[cfg(feature = \"transport-sse-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-server\")))]\npub mod sse_server;\n#[cfg(feature = \"transport-sse-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-server\")))]\npub use sse_server::SseServer;\n\n#[cfg(feature = \"auth\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\npub mod auth;\n#[cfg(feature = \"auth\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\npub use auth::{AuthError, AuthorizationManager, AuthorizationSession, AuthorizedHttpClient};\n\n// #[cfg(feature = \"transport-ws\")]\n// #[cfg_attr(docsrs, doc(cfg(feature = \"transport-ws\")))]\n// pub mod ws;\n#[cfg(feature = \"transport-streamable-http-server-session\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server-session\")))]\npub mod streamable_http_server;\n#[cfg(feature = \"transport-streamable-http-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server\")))]\npub use streamable_http_server::tower::{StreamableHttpServerConfig, StreamableHttpService};\n\n#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\npub mod streamable_http_client;\n#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\npub use streamable_http_client::StreamableHttpClientTransport;\n\n/// Common use codes\npub mod common;\n\npub trait Transport: Send\nwhere\n R: ServiceRole,\n{\n type Error: std::error::Error + Send + Sync + 'static;\n fn name() -> Cow<'static, str> {\n std::any::type_name::().into()\n }\n /// Send a message to the transport\n ///\n /// Notice that the future returned by this function should be `Send` and `'static`.\n /// It's because the sending message could be executed concurrently.\n ///\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static;\n\n /// Receive a message from the transport, this operation is sequential.\n fn receive(&mut self) -> impl Future>> + Send;\n\n /// Close the transport\n fn close(&mut self) -> impl Future> + Send;\n}\n\npub trait IntoTransport: Send + 'static\nwhere\n R: ServiceRole,\n E: std::error::Error + Send + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static;\n}\n\npub enum TransportAdapterIdentity {}\nimpl IntoTransport for T\nwhere\n T: Transport + Send + 'static,\n R: ServiceRole,\n E: std::error::Error + Send + Sync + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n self\n }\n}\n\n/// A transport that can send a single message and then close itself\npub struct OneshotTransport\nwhere\n R: ServiceRole,\n{\n message: Option>,\n sender: tokio::sync::mpsc::Sender>,\n finished_signal: Arc,\n}\n\nimpl OneshotTransport\nwhere\n R: ServiceRole,\n{\n pub fn new(\n message: RxJsonRpcMessage,\n ) -> (Self, tokio::sync::mpsc::Receiver>) {\n let (sender, receiver) = tokio::sync::mpsc::channel(16);\n (\n Self {\n message: Some(message),\n sender,\n finished_signal: Arc::new(tokio::sync::Notify::new()),\n },\n receiver,\n )\n }\n}\n\nimpl Transport for OneshotTransport\nwhere\n R: ServiceRole,\n{\n type Error = tokio::sync::mpsc::error::SendError>;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let sender = self.sender.clone();\n let terminate = matches!(item, TxJsonRpcMessage::::Response(_));\n let signal = self.finished_signal.clone();\n async move {\n sender.send(item).await?;\n if terminate {\n signal.notify_waiters();\n }\n Ok(())\n }\n }\n\n async fn receive(&mut self) -> Option> {\n if self.message.is_none() {\n self.finished_signal.notified().await;\n }\n self.message.take()\n }\n\n fn close(&mut self) -> impl Future> + Send {\n self.message.take();\n std::future::ready(Ok(()))\n }\n}\n\n#[derive(Debug, thiserror::Error)]\n#[error(\"Transport [{transport_name}] error: {error}\")]\npub struct DynamicTransportError {\n pub transport_name: Cow<'static, str>,\n pub transport_type_id: std::any::TypeId,\n #[source]\n pub error: Box,\n}\n\nimpl DynamicTransportError {\n pub fn new + 'static, R: ServiceRole>(e: T::Error) -> Self {\n Self {\n transport_name: T::name(),\n transport_type_id: std::any::TypeId::of::(),\n error: Box::new(e),\n }\n }\n pub fn downcast + 'static, R: ServiceRole>(self) -> Result {\n if !self.is::() {\n Err(self)\n } else {\n Ok(self\n .error\n .downcast::()\n .map(|e| *e)\n .expect(\"type is checked\"))\n }\n }\n pub fn is + 'static, R: ServiceRole>(&self) -> bool {\n self.error.is::() && self.transport_type_id == std::any::TypeId::of::()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/reqwest/sse_client.rs", "use std::sync::Arc;\n\nuse futures::StreamExt;\nuse http::Uri;\nuse reqwest::header::ACCEPT;\nuse sse_stream::SseStream;\n\nuse crate::transport::{\n SseClientTransport,\n common::http_header::{EVENT_STREAM_MIME_TYPE, HEADER_LAST_EVENT_ID},\n sse_client::{SseClient, SseClientConfig, SseTransportError},\n};\n\nimpl SseClient for reqwest::Client {\n type Error = reqwest::Error;\n\n async fn post_message(\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n auth_token: Option,\n ) -> Result<(), SseTransportError> {\n let mut request_builder = self.post(uri.to_string()).json(&message);\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n request_builder\n .send()\n .await\n .and_then(|resp| resp.error_for_status())\n .map_err(SseTransportError::from)\n .map(drop)\n }\n\n async fn get_stream(\n &self,\n uri: Uri,\n last_event_id: Option,\n auth_token: Option,\n ) -> Result<\n crate::transport::common::client_side_sse::BoxedSseResponse,\n SseTransportError,\n > {\n let mut request_builder = self\n .get(uri.to_string())\n .header(ACCEPT, EVENT_STREAM_MIME_TYPE);\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n if let Some(last_event_id) = last_event_id {\n request_builder = request_builder.header(HEADER_LAST_EVENT_ID, last_event_id);\n }\n let response = request_builder.send().await?;\n let response = response.error_for_status()?;\n match response.headers().get(reqwest::header::CONTENT_TYPE) {\n Some(ct) => {\n if !ct.as_bytes().starts_with(EVENT_STREAM_MIME_TYPE.as_bytes()) {\n return Err(SseTransportError::UnexpectedContentType(Some(ct.clone())));\n }\n }\n None => {\n return Err(SseTransportError::UnexpectedContentType(None));\n }\n }\n let event_stream = SseStream::from_byte_stream(response.bytes_stream()).boxed();\n Ok(event_stream)\n }\n}\n\nimpl SseClientTransport {\n pub async fn start(\n uri: impl Into>,\n ) -> Result> {\n SseClientTransport::start_with_client(\n reqwest::Client::default(),\n SseClientConfig {\n sse_endpoint: uri.into(),\n ..Default::default()\n },\n )\n .await\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/server_side_http.rs", "#![allow(dead_code)]\nuse std::{convert::Infallible, fmt::Display, sync::Arc, time::Duration};\n\nuse bytes::{Buf, Bytes};\nuse http::Response;\nuse http_body::Body;\nuse http_body_util::{BodyExt, Empty, Full, combinators::BoxBody};\nuse sse_stream::{KeepAlive, Sse, SseBody};\n\nuse super::http_header::EVENT_STREAM_MIME_TYPE;\nuse crate::model::{ClientJsonRpcMessage, ServerJsonRpcMessage};\n\npub type SessionId = Arc;\n\npub fn session_id() -> SessionId {\n uuid::Uuid::new_v4().to_string().into()\n}\n\npub const DEFAULT_AUTO_PING_INTERVAL: Duration = Duration::from_secs(15);\n\npub(crate) type BoxResponse = Response>;\n\npub(crate) fn accepted_response() -> Response> {\n Response::builder()\n .status(http::StatusCode::ACCEPTED)\n .body(Empty::new().boxed())\n .expect(\"valid response\")\n}\npin_project_lite::pin_project! {\n struct TokioTimer {\n #[pin]\n sleep: tokio::time::Sleep,\n }\n}\nimpl Future for TokioTimer {\n type Output = ();\n\n fn poll(\n self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll {\n let this = self.project();\n this.sleep.poll(cx)\n }\n}\nimpl sse_stream::Timer for TokioTimer {\n fn from_duration(duration: Duration) -> Self {\n Self {\n sleep: tokio::time::sleep(duration),\n }\n }\n\n fn reset(self: std::pin::Pin<&mut Self>, when: std::time::Instant) {\n let this = self.project();\n this.sleep.reset(tokio::time::Instant::from_std(when));\n }\n}\n\n#[derive(Debug, Clone)]\npub struct ServerSseMessage {\n pub event_id: Option,\n pub message: Arc,\n}\n\npub(crate) fn sse_stream_response(\n stream: impl futures::Stream + Send + Sync + 'static,\n keep_alive: Option,\n) -> Response> {\n use futures::StreamExt;\n let stream = SseBody::new(stream.map(|message| {\n let data = serde_json::to_string(&message.message).expect(\"valid message\");\n let mut sse = Sse::default().data(data);\n sse.id = message.event_id;\n Result::::Ok(sse)\n }));\n let stream = match keep_alive {\n Some(duration) => stream\n .with_keep_alive::(KeepAlive::new().interval(duration))\n .boxed(),\n None => stream.boxed(),\n };\n Response::builder()\n .status(http::StatusCode::OK)\n .header(http::header::CONTENT_TYPE, EVENT_STREAM_MIME_TYPE)\n .header(http::header::CACHE_CONTROL, \"no-cache\")\n .body(stream)\n .expect(\"valid response\")\n}\n\npub(crate) const fn internal_error_response(\n context: &str,\n) -> impl FnOnce(E) -> Response> {\n move |error| {\n tracing::error!(\"Internal server error when {context}: {error}\");\n Response::builder()\n .status(http::StatusCode::INTERNAL_SERVER_ERROR)\n .body(\n Full::new(Bytes::from(format!(\n \"Encounter an error when {context}: {error}\"\n )))\n .boxed(),\n )\n .expect(\"valid response\")\n }\n}\n\npub(crate) fn unexpected_message_response(expect: &str) -> Response> {\n Response::builder()\n .status(http::StatusCode::UNPROCESSABLE_ENTITY)\n .body(Full::new(Bytes::from(format!(\"Unexpected message, expect {expect}\"))).boxed())\n .expect(\"valid response\")\n}\n\npub(crate) async fn expect_json(\n body: B,\n) -> Result>>\nwhere\n B: Body + Send + 'static,\n B::Error: Display,\n{\n match body.collect().await {\n Ok(bytes) => {\n match serde_json::from_reader::<_, ClientJsonRpcMessage>(bytes.aggregate().reader()) {\n Ok(message) => Ok(message),\n Err(e) => {\n let response = Response::builder()\n .status(http::StatusCode::UNSUPPORTED_MEDIA_TYPE)\n .body(\n Full::new(Bytes::from(format!(\"fail to deserialize request body {e}\")))\n .boxed(),\n )\n .expect(\"valid response\");\n Err(response)\n }\n }\n }\n Err(e) => {\n let response = Response::builder()\n .status(http::StatusCode::INTERNAL_SERVER_ERROR)\n .body(Full::new(Bytes::from(format!(\"Failed to read request body: {e}\"))).boxed())\n .expect(\"valid response\");\n Err(response)\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/client_side_sse.rs", "use std::{\n pin::Pin,\n sync::Arc,\n task::{Poll, ready},\n time::Duration,\n};\n\nuse futures::{Stream, stream::BoxStream};\nuse sse_stream::{Error as SseError, Sse};\n\nuse crate::model::ServerJsonRpcMessage;\n\npub type BoxedSseResponse = BoxStream<'static, Result>;\n\npub trait SseRetryPolicy: std::fmt::Debug + Send + Sync {\n fn retry(&self, current_times: usize) -> Option;\n}\n\n#[derive(Debug, Clone)]\npub struct FixedInterval {\n pub max_times: Option,\n pub duration: Duration,\n}\n\nimpl SseRetryPolicy for FixedInterval {\n fn retry(&self, current_times: usize) -> Option {\n if let Some(max_times) = self.max_times {\n if current_times >= max_times {\n return None;\n }\n }\n Some(self.duration)\n }\n}\n\nimpl FixedInterval {\n pub const DEFAULT_MIN_DURATION: Duration = Duration::from_millis(1000);\n}\n\nimpl Default for FixedInterval {\n fn default() -> Self {\n Self {\n max_times: None,\n duration: Self::DEFAULT_MIN_DURATION,\n }\n }\n}\n\n#[derive(Debug, Clone)]\npub struct ExponentialBackoff {\n pub max_times: Option,\n pub base_duration: Duration,\n}\n\nimpl ExponentialBackoff {\n pub const DEFAULT_DURATION: Duration = Duration::from_millis(1000);\n}\n\nimpl Default for ExponentialBackoff {\n fn default() -> Self {\n Self {\n max_times: None,\n base_duration: Self::DEFAULT_DURATION,\n }\n }\n}\n\nimpl SseRetryPolicy for ExponentialBackoff {\n fn retry(&self, current_times: usize) -> Option {\n if let Some(max_times) = self.max_times {\n if current_times >= max_times {\n return None;\n }\n }\n Some(self.base_duration * (2u32.pow(current_times as u32)))\n }\n}\n\n#[derive(Debug, Clone, Copy, Default)]\npub struct NeverRetry;\n\nimpl SseRetryPolicy for NeverRetry {\n fn retry(&self, _current_times: usize) -> Option {\n None\n }\n}\n\n#[derive(Debug, Default)]\npub struct NeverReconnect {\n error: Option,\n}\n\nimpl SseStreamReconnect for NeverReconnect {\n type Error = E;\n type Future = futures::future::Ready>;\n fn retry_connection(&mut self, _last_event_id: Option<&str>) -> Self::Future {\n futures::future::ready(Err(self.error.take().expect(\"should not be called again\")))\n }\n}\n\npub(crate) trait SseStreamReconnect {\n type Error: std::error::Error;\n type Future: Future> + Send;\n fn retry_connection(&mut self, last_event_id: Option<&str>) -> Self::Future;\n}\n\npin_project_lite::pin_project! {\n pub(crate) struct SseAutoReconnectStream\n where R: SseStreamReconnect\n {\n retry_policy: Arc,\n last_event_id: Option,\n server_retry_interval: Option,\n connector: R,\n #[pin]\n state: SseAutoReconnectStreamState,\n }\n}\n\nimpl SseAutoReconnectStream {\n pub fn new(\n stream: BoxedSseResponse,\n connector: R,\n retry_policy: Arc,\n ) -> Self {\n Self {\n retry_policy,\n last_event_id: None,\n server_retry_interval: None,\n connector,\n state: SseAutoReconnectStreamState::Connected { stream },\n }\n }\n}\n\nimpl SseAutoReconnectStream> {\n #[allow(dead_code)]\n pub(crate) fn never_reconnect(stream: BoxedSseResponse, error_when_reconnect: E) -> Self {\n Self {\n retry_policy: Arc::new(NeverRetry),\n last_event_id: None,\n server_retry_interval: None,\n connector: NeverReconnect {\n error: Some(error_when_reconnect),\n },\n state: SseAutoReconnectStreamState::Connected { stream },\n }\n }\n}\n\npin_project_lite::pin_project! {\n #[project = SseAutoReconnectStreamStateProj]\n pub enum SseAutoReconnectStreamState {\n Connected {\n #[pin]\n stream: BoxedSseResponse,\n },\n Retrying {\n retry_times: usize,\n #[pin]\n retrying: F,\n },\n WaitingNextRetry {\n #[pin]\n sleep: tokio::time::Sleep,\n retry_times: usize,\n },\n Terminated,\n }\n}\n\nimpl Stream for SseAutoReconnectStream\nwhere\n R: SseStreamReconnect,\n{\n type Item = Result;\n fn poll_next(\n mut self: Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> Poll> {\n let mut this = self.as_mut().project();\n // let this_state = this.state.as_mut().project()\n let state = this.state.as_mut().project();\n let next_state = match state {\n SseAutoReconnectStreamStateProj::Connected { stream } => {\n match ready!(stream.poll_next(cx)) {\n Some(Ok(sse)) => {\n if let Some(new_server_retry) = sse.retry {\n *this.server_retry_interval =\n Some(Duration::from_millis(new_server_retry));\n }\n if let Some(event_id) = sse.id {\n *this.last_event_id = Some(event_id);\n }\n if let Some(data) = sse.data {\n match serde_json::from_str::(&data) {\n Err(e) => {\n // not sure should this be a hard error\n tracing::warn!(\"failed to deserialize server message: {e}\");\n return self.poll_next(cx);\n }\n Ok(message) => {\n return Poll::Ready(Some(Ok(message)));\n }\n };\n } else {\n return self.poll_next(cx);\n }\n }\n Some(Err(e)) => {\n tracing::warn!(\"sse stream error: {e}\");\n let retrying = this\n .connector\n .retry_connection(this.last_event_id.as_deref());\n SseAutoReconnectStreamState::Retrying {\n retry_times: 0,\n retrying,\n }\n }\n None => {\n tracing::debug!(\"sse stream terminated\");\n return Poll::Ready(None);\n }\n }\n }\n SseAutoReconnectStreamStateProj::Retrying {\n retry_times,\n retrying,\n } => {\n let retry_result = ready!(retrying.poll(cx));\n match retry_result {\n Ok(new_stream) => SseAutoReconnectStreamState::Connected { stream: new_stream },\n Err(e) => {\n tracing::debug!(\"retry sse stream error: {e}\");\n *retry_times += 1;\n if let Some(interval) = this.retry_policy.retry(*retry_times) {\n let interval = this\n .server_retry_interval\n .map(|server_retry_interval| server_retry_interval.max(interval))\n .unwrap_or(interval);\n let sleep = tokio::time::sleep(interval);\n SseAutoReconnectStreamState::WaitingNextRetry {\n sleep,\n retry_times: *retry_times,\n }\n } else {\n tracing::error!(\"sse stream error: {e}, max retry times reached\");\n this.state.set(SseAutoReconnectStreamState::Terminated);\n return Poll::Ready(Some(Err(e)));\n }\n }\n }\n }\n SseAutoReconnectStreamStateProj::WaitingNextRetry { sleep, retry_times } => {\n ready!(sleep.poll(cx));\n let retrying = this\n .connector\n .retry_connection(this.last_event_id.as_deref());\n let retry_times = *retry_times;\n SseAutoReconnectStreamState::Retrying {\n retry_times,\n retrying,\n }\n }\n SseAutoReconnectStreamStateProj::Terminated => {\n return Poll::Ready(None);\n }\n };\n // update the state\n this.state.set(next_state);\n self.poll_next(cx)\n }\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/tool.rs", "use darling::{FromMeta, ast::NestedMeta};\nuse proc_macro2::TokenStream;\nuse quote::{ToTokens, format_ident, quote};\nuse syn::{Expr, Ident, ImplItemFn, ReturnType};\n#[derive(FromMeta, Default, Debug)]\n#[darling(default)]\npub struct ToolAttribute {\n /// The name of the tool\n pub name: Option,\n pub description: Option,\n /// A JSON Schema object defining the expected parameters for the tool\n pub input_schema: Option,\n /// Optional additional tool information.\n pub annotations: Option,\n}\n\npub struct ResolvedToolAttribute {\n pub name: String,\n pub description: Option,\n pub input_schema: Expr,\n pub annotations: Expr,\n}\n\nimpl ResolvedToolAttribute {\n pub fn into_fn(self, fn_ident: Ident) -> syn::Result {\n let Self {\n name,\n description,\n input_schema,\n annotations,\n } = self;\n let description = if let Some(description) = description {\n quote! { Some(#description.into()) }\n } else {\n quote! { None }\n };\n let tokens = quote! {\n pub fn #fn_ident() -> rmcp::model::Tool {\n rmcp::model::Tool {\n name: #name.into(),\n description: #description,\n input_schema: #input_schema,\n annotations: #annotations,\n }\n }\n };\n syn::parse2::(tokens)\n }\n}\n\n#[derive(FromMeta, Debug, Default)]\n#[darling(default)]\npub struct ToolAnnotationsAttribute {\n /// A human-readable title for the tool.\n pub title: Option,\n\n /// If true, the tool does not modify its environment.\n ///\n /// Default: false\n pub read_only_hint: Option,\n\n /// If true, the tool may perform destructive updates to its environment.\n /// If false, the tool performs only additive updates.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: true\n /// A human-readable description of the tool's purpose.\n pub destructive_hint: Option,\n\n /// If true, calling the tool repeatedly with the same arguments\n /// will have no additional effect on the its environment.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: false.\n pub idempotent_hint: Option,\n\n /// If true, this tool may interact with an \"open world\" of external\n /// entities. If false, the tool's domain of interaction is closed.\n /// For example, the world of a web search tool is open, whereas that\n /// of a memory tool is not.\n ///\n /// Default: true\n pub open_world_hint: Option,\n}\n\nfn none_expr() -> Expr {\n syn::parse2::(quote! { None }).unwrap()\n}\n\n// extract doc line from attribute\nfn extract_doc_line(existing_docs: Option, attr: &syn::Attribute) -> Option {\n if !attr.path().is_ident(\"doc\") {\n return None;\n }\n\n let syn::Meta::NameValue(name_value) = &attr.meta else {\n return None;\n };\n\n let syn::Expr::Lit(expr_lit) = &name_value.value else {\n return None;\n };\n\n let syn::Lit::Str(lit_str) = &expr_lit.lit else {\n return None;\n };\n\n let content = lit_str.value().trim().to_string();\n match (existing_docs, content) {\n (Some(mut existing_docs), content) if !content.is_empty() => {\n existing_docs.push('\\n');\n existing_docs.push_str(&content);\n Some(existing_docs)\n }\n (Some(existing_docs), _) => Some(existing_docs),\n (None, content) if !content.is_empty() => Some(content),\n _ => None,\n }\n}\n\npub fn tool(attr: TokenStream, input: TokenStream) -> syn::Result {\n let attribute = if attr.is_empty() {\n Default::default()\n } else {\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n ToolAttribute::from_list(&attr_args)?\n };\n let mut fn_item = syn::parse2::(input.clone())?;\n let fn_ident = &fn_item.sig.ident;\n\n let tool_attr_fn_ident = format_ident!(\"{}_tool_attr\", fn_ident);\n let input_schema_expr = if let Some(input_schema) = attribute.input_schema {\n input_schema\n } else {\n // try to find some parameters wrapper in the function\n let params_ty = fn_item.sig.inputs.iter().find_map(|input| {\n if let syn::FnArg::Typed(pat_type) = input {\n if let syn::Type::Path(type_path) = &*pat_type.ty {\n if type_path\n .path\n .segments\n .last()\n .is_some_and(|type_name| type_name.ident == \"Parameters\")\n {\n return Some(pat_type.ty.clone());\n }\n }\n }\n None\n });\n if let Some(params_ty) = params_ty {\n // if found, use the Parameters schema\n syn::parse2::(quote! {\n rmcp::handler::server::tool::cached_schema_for_type::<#params_ty>()\n })?\n } else {\n // if not found, use the default EmptyObject schema\n syn::parse2::(quote! {\n rmcp::handler::server::tool::cached_schema_for_type::()\n })?\n }\n };\n let annotations_expr = if let Some(annotations) = attribute.annotations {\n let ToolAnnotationsAttribute {\n title,\n read_only_hint,\n destructive_hint,\n idempotent_hint,\n open_world_hint,\n } = annotations;\n fn wrap_option(x: Option) -> TokenStream {\n x.map(|x| quote! {Some(#x.into())})\n .unwrap_or(quote! { None })\n }\n let title = wrap_option(title);\n let read_only_hint = wrap_option(read_only_hint);\n let destructive_hint = wrap_option(destructive_hint);\n let idempotent_hint = wrap_option(idempotent_hint);\n let open_world_hint = wrap_option(open_world_hint);\n let token_stream = quote! {\n Some(rmcp::model::ToolAnnotations {\n title: #title,\n read_only_hint: #read_only_hint,\n destructive_hint: #destructive_hint,\n idempotent_hint: #idempotent_hint,\n open_world_hint: #open_world_hint,\n })\n };\n syn::parse2::(token_stream)?\n } else {\n none_expr()\n };\n let resolved_tool_attr = ResolvedToolAttribute {\n name: attribute.name.unwrap_or_else(|| fn_ident.to_string()),\n description: attribute\n .description\n .or_else(|| fn_item.attrs.iter().fold(None, extract_doc_line)),\n input_schema: input_schema_expr,\n annotations: annotations_expr,\n };\n let tool_attr_fn = resolved_tool_attr.into_fn(tool_attr_fn_ident)?;\n // modify the the input function\n if fn_item.sig.asyncness.is_some() {\n // 1. remove asyncness from sig\n // 2. make return type: `std::pin::Pin + Send + '_>>`\n // 3. make body: { Box::pin(async move { #body }) }\n let new_output = syn::parse2::({\n let mut lt = quote! { 'static };\n if let Some(receiver) = fn_item.sig.receiver() {\n if let Some((_, receiver_lt)) = receiver.reference.as_ref() {\n if let Some(receiver_lt) = receiver_lt {\n lt = quote! { #receiver_lt };\n } else {\n lt = quote! { '_ };\n }\n }\n }\n match &fn_item.sig.output {\n syn::ReturnType::Default => {\n quote! { -> std::pin::Pin + Send + #lt>> }\n }\n syn::ReturnType::Type(_, ty) => {\n quote! { -> std::pin::Pin + Send + #lt>> }\n }\n }\n })?;\n let prev_block = &fn_item.block;\n let new_block = syn::parse2::(quote! {\n { Box::pin(async move #prev_block ) }\n })?;\n fn_item.sig.asyncness = None;\n fn_item.sig.output = new_output;\n fn_item.block = new_block;\n }\n Ok(quote! {\n #tool_attr_fn\n #fn_item\n })\n}\n\n#[cfg(test)]\nmod test {\n use super::*;\n #[test]\n fn test_trait_tool_macro() -> syn::Result<()> {\n let attr = quote! {\n name = \"direct-annotated-tool\",\n annotations(title = \"Annotated Tool\", read_only_hint = true)\n };\n let input = quote! {\n async fn async_method(&self, Parameters(Request { fields }): Parameters) {\n drop(fields)\n }\n };\n let _input = tool(attr, input)?;\n\n Ok(())\n }\n\n #[test]\n fn test_doc_comment_description() -> syn::Result<()> {\n let attr = quote! {}; // No explicit description\n let input = quote! {\n /// This is a test description from doc comments\n /// with multiple lines\n fn test_function(&self) -> Result<(), Error> {\n Ok(())\n }\n };\n let result = tool(attr, input)?;\n\n // The output should contain the description from doc comments\n let result_str = result.to_string();\n assert!(result_str.contains(\"This is a test description from doc comments\"));\n assert!(result_str.contains(\"with multiple lines\"));\n\n Ok(())\n }\n\n #[test]\n fn test_explicit_description_priority() -> syn::Result<()> {\n let attr = quote! {\n description = \"Explicit description has priority\"\n };\n let input = quote! {\n /// Doc comment description that should be ignored\n fn test_function(&self) -> Result<(), Error> {\n Ok(())\n }\n };\n let result = tool(attr, input)?;\n\n // The output should contain the explicit description\n let result_str = result.to_string();\n assert!(result_str.contains(\"Explicit description has priority\"));\n Ok(())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/worker.rs", "use std::borrow::Cow;\n\nuse tokio_util::sync::CancellationToken;\nuse tracing::{Instrument, Level};\n\nuse super::{IntoTransport, Transport};\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\n#[derive(Debug, thiserror::Error)]\npub enum WorkerQuitReason {\n #[error(\"Join error {0}\")]\n Join(#[from] tokio::task::JoinError),\n #[error(\"Transport fatal {error}, when {context}\")]\n Fatal {\n error: Cow<'static, str>,\n context: Cow<'static, str>,\n },\n #[error(\"Transport canncelled\")]\n Cancelled,\n #[error(\"Transport closed\")]\n TransportClosed,\n #[error(\"Handler terminated\")]\n HandlerTerminated,\n}\n\nimpl WorkerQuitReason {\n pub fn fatal(msg: impl Into>, context: impl Into>) -> Self {\n Self::Fatal {\n error: msg.into(),\n context: context.into(),\n }\n }\n pub fn fatal_context(\n context: impl Into>,\n ) -> impl FnOnce(E) -> Self {\n |e| Self::Fatal {\n error: Cow::Owned(format!(\"{e}\")),\n context: context.into(),\n }\n }\n}\n\npub trait Worker: Sized + Send + 'static {\n type Error: std::error::Error + Send + Sync + 'static;\n type Role: ServiceRole;\n fn err_closed() -> Self::Error;\n fn err_join(e: tokio::task::JoinError) -> Self::Error;\n fn run(\n self,\n context: WorkerContext,\n ) -> impl Future> + Send;\n fn config(&self) -> WorkerConfig {\n WorkerConfig::default()\n }\n}\n\npub struct WorkerSendRequest {\n pub message: TxJsonRpcMessage,\n pub responder: tokio::sync::oneshot::Sender>,\n}\n\npub struct WorkerTransport {\n rx: tokio::sync::mpsc::Receiver>,\n send_service: tokio::sync::mpsc::Sender>,\n join_handle: Option>>,\n _drop_guard: tokio_util::sync::DropGuard,\n ct: CancellationToken,\n}\n\npub struct WorkerConfig {\n pub name: Option,\n pub channel_buffer_capacity: usize,\n}\n\nimpl Default for WorkerConfig {\n fn default() -> Self {\n Self {\n name: None,\n channel_buffer_capacity: 16,\n }\n }\n}\npub enum WorkerAdapter {}\n\nimpl IntoTransport for W {\n fn into_transport(self) -> impl Transport + 'static {\n WorkerTransport::spawn(self)\n }\n}\n\nimpl WorkerTransport {\n pub fn cancel_token(&self) -> CancellationToken {\n self.ct.clone()\n }\n pub fn spawn(worker: W) -> Self {\n Self::spawn_with_ct(worker, CancellationToken::new())\n }\n pub fn spawn_with_ct(worker: W, transport_task_ct: CancellationToken) -> Self {\n let config = worker.config();\n let worker_name = config.name;\n let (to_transport_tx, from_handler_rx) =\n tokio::sync::mpsc::channel::>(config.channel_buffer_capacity);\n let (to_handler_tx, from_transport_rx) =\n tokio::sync::mpsc::channel::>(config.channel_buffer_capacity);\n let context = WorkerContext {\n to_handler_tx,\n from_handler_rx,\n cancellation_token: transport_task_ct.clone(),\n };\n\n let join_handle = tokio::spawn(async move {\n worker\n .run(context)\n .instrument(tracing::span!(\n Level::TRACE,\n \"transport_worker\",\n name = worker_name,\n ))\n .await\n .inspect_err(|e| match e {\n WorkerQuitReason::Cancelled\n | WorkerQuitReason::TransportClosed\n | WorkerQuitReason::HandlerTerminated => {\n tracing::debug!(\"worker quit with reason: {:?}\", e);\n }\n WorkerQuitReason::Join(e) => {\n tracing::error!(\"worker quit with join error: {:?}\", e);\n }\n WorkerQuitReason::Fatal { error, context } => {\n tracing::error!(\"worker quit with fatal: {error}, when {context}\");\n }\n })\n .inspect(|_| {\n tracing::debug!(\"worker quit\");\n })\n });\n Self {\n rx: from_transport_rx,\n send_service: to_transport_tx,\n join_handle: Some(join_handle),\n ct: transport_task_ct.clone(),\n _drop_guard: transport_task_ct.drop_guard(),\n }\n }\n}\n\npub struct SendRequest {\n pub message: TxJsonRpcMessage,\n pub responder: tokio::sync::oneshot::Sender>,\n}\n\npub struct WorkerContext {\n pub to_handler_tx: tokio::sync::mpsc::Sender>,\n pub from_handler_rx: tokio::sync::mpsc::Receiver>,\n pub cancellation_token: CancellationToken,\n}\n\nimpl WorkerContext {\n pub async fn send_to_handler(\n &mut self,\n item: RxJsonRpcMessage,\n ) -> Result<(), WorkerQuitReason> {\n self.to_handler_tx\n .send(item)\n .await\n .map_err(|_| WorkerQuitReason::HandlerTerminated)\n }\n\n pub async fn recv_from_handler(&mut self) -> Result, WorkerQuitReason> {\n self.from_handler_rx\n .recv()\n .await\n .ok_or(WorkerQuitReason::HandlerTerminated)\n }\n}\n\nimpl Transport for WorkerTransport {\n type Error = W::Error;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let tx = self.send_service.clone();\n let (responder, receiver) = tokio::sync::oneshot::channel();\n let request = WorkerSendRequest {\n message: item,\n responder,\n };\n async move {\n tx.send(request).await.map_err(|_| W::err_closed())?;\n receiver.await.map_err(|_| W::err_closed())??;\n Ok(())\n }\n }\n async fn receive(&mut self) -> Option> {\n self.rx.recv().await\n }\n async fn close(&mut self) -> Result<(), Self::Error> {\n if let Some(handle) = self.join_handle.take() {\n self.ct.cancel();\n let _quit_reason = handle.await.map_err(W::err_join)?;\n Ok(())\n } else {\n Ok(())\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server/tool.rs", "use std::{\n any::TypeId, borrow::Cow, collections::HashMap, future::Ready, marker::PhantomData, sync::Arc,\n};\n\nuse futures::future::{BoxFuture, FutureExt};\nuse schemars::{JsonSchema, transform::AddNullable};\nuse serde::{Deserialize, Serialize, de::DeserializeOwned};\nuse tokio_util::sync::CancellationToken;\n\npub use super::router::tool::{ToolRoute, ToolRouter};\nuse crate::{\n RoleServer,\n model::{CallToolRequestParam, CallToolResult, IntoContents, JsonObject},\n schemars::generate::SchemaSettings,\n service::RequestContext,\n};\n/// A shortcut for generating a JSON schema for a type.\npub fn schema_for_type() -> JsonObject {\n // explicitly to align json schema version to official specifications.\n // https://github.com/modelcontextprotocol/modelcontextprotocol/blob/main/schema/2025-03-26/schema.json\n // TODO: update to 2020-12 waiting for the mcp spec update\n let mut settings = SchemaSettings::draft07();\n settings.transforms = vec![Box::new(AddNullable::default())];\n let generator = settings.into_generator();\n let schema = generator.into_root_schema_for::();\n let object = serde_json::to_value(schema).expect(\"failed to serialize schema\");\n match object {\n serde_json::Value::Object(object) => object,\n _ => panic!(\"unexpected schema value\"),\n }\n}\n\n/// Call [`schema_for_type`] with a cache\npub fn cached_schema_for_type() -> Arc {\n thread_local! {\n static CACHE_FOR_TYPE: std::sync::RwLock>> = Default::default();\n };\n CACHE_FOR_TYPE.with(|cache| {\n if let Some(x) = cache\n .read()\n .expect(\"schema cache lock poisoned\")\n .get(&TypeId::of::())\n {\n x.clone()\n } else {\n let schema = schema_for_type::();\n let schema = Arc::new(schema);\n cache\n .write()\n .expect(\"schema cache lock poisoned\")\n .insert(TypeId::of::(), schema.clone());\n schema\n }\n })\n}\n\n/// Deserialize a JSON object into a type\npub fn parse_json_object(input: JsonObject) -> Result {\n serde_json::from_value(serde_json::Value::Object(input)).map_err(|e| {\n crate::ErrorData::invalid_params(\n format!(\"failed to deserialize parameters: {error}\", error = e),\n None,\n )\n })\n}\npub struct ToolCallContext<'s, S> {\n pub request_context: RequestContext,\n pub service: &'s S,\n pub name: Cow<'static, str>,\n pub arguments: Option,\n}\n\nimpl<'s, S> ToolCallContext<'s, S> {\n pub fn new(\n service: &'s S,\n CallToolRequestParam { name, arguments }: CallToolRequestParam,\n request_context: RequestContext,\n ) -> Self {\n Self {\n request_context,\n service,\n name,\n arguments,\n }\n }\n pub fn name(&self) -> &str {\n &self.name\n }\n pub fn request_context(&self) -> &RequestContext {\n &self.request_context\n }\n}\n\npub trait FromToolCallContextPart: Sized {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result;\n}\n\npub trait IntoCallToolResult {\n fn into_call_tool_result(self) -> Result;\n}\n\nimpl IntoCallToolResult for T {\n fn into_call_tool_result(self) -> Result {\n Ok(CallToolResult::success(self.into_contents()))\n }\n}\n\nimpl IntoCallToolResult for Result {\n fn into_call_tool_result(self) -> Result {\n match self {\n Ok(value) => Ok(CallToolResult::success(value.into_contents())),\n Err(error) => Ok(CallToolResult::error(error.into_contents())),\n }\n }\n}\n\nimpl IntoCallToolResult for Result {\n fn into_call_tool_result(self) -> Result {\n match self {\n Ok(value) => value.into_call_tool_result(),\n Err(error) => Err(error),\n }\n }\n}\n\npin_project_lite::pin_project! {\n #[project = IntoCallToolResultFutProj]\n pub enum IntoCallToolResultFut {\n Pending {\n #[pin]\n fut: F,\n _marker: PhantomData,\n },\n Ready {\n #[pin]\n result: Ready>,\n }\n }\n}\n\nimpl Future for IntoCallToolResultFut\nwhere\n F: Future,\n R: IntoCallToolResult,\n{\n type Output = Result;\n\n fn poll(\n self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll {\n match self.project() {\n IntoCallToolResultFutProj::Pending { fut, _marker } => {\n fut.poll(cx).map(IntoCallToolResult::into_call_tool_result)\n }\n IntoCallToolResultFutProj::Ready { result } => result.poll(cx),\n }\n }\n}\n\nimpl IntoCallToolResult for Result {\n fn into_call_tool_result(self) -> Result {\n self\n }\n}\n\npub trait CallToolHandler {\n fn call(\n self,\n context: ToolCallContext<'_, S>,\n ) -> BoxFuture<'_, Result>;\n}\n\npub type DynCallToolHandler = dyn for<'s> Fn(ToolCallContext<'s, S>) -> BoxFuture<'s, Result>\n + Send\n + Sync;\n\n/// Parameter Extractor\n///\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(transparent)]\npub struct Parameters

(pub P);\n\nimpl JsonSchema for Parameters

{\n fn schema_name() -> Cow<'static, str> {\n P::schema_name()\n }\n\n fn json_schema(generator: &mut schemars::SchemaGenerator) -> schemars::Schema {\n P::json_schema(generator)\n }\n}\n\nimpl FromToolCallContextPart for CancellationToken {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(context.request_context.ct.clone())\n }\n}\n\npub struct ToolName(pub Cow<'static, str>);\n\nimpl FromToolCallContextPart for ToolName {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(Self(context.name.clone()))\n }\n}\n\nimpl FromToolCallContextPart for Parameters

\nwhere\n P: DeserializeOwned,\n{\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let arguments = context.arguments.take().unwrap_or_default();\n let value: P =\n serde_json::from_value(serde_json::Value::Object(arguments)).map_err(|e| {\n crate::ErrorData::invalid_params(\n format!(\"failed to deserialize parameters: {error}\", error = e),\n None,\n )\n })?;\n Ok(Parameters(value))\n }\n}\n\nimpl FromToolCallContextPart for JsonObject {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let object = context.arguments.take().unwrap_or_default();\n Ok(object)\n }\n}\n\nimpl FromToolCallContextPart for crate::model::Extensions {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let extensions = context.request_context.extensions.clone();\n Ok(extensions)\n }\n}\n\npub struct Extension(pub T);\n\nimpl FromToolCallContextPart for Extension\nwhere\n T: Send + Sync + 'static + Clone,\n{\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let extension = context\n .request_context\n .extensions\n .get::()\n .cloned()\n .ok_or_else(|| {\n crate::ErrorData::invalid_params(\n format!(\"missing extension {}\", std::any::type_name::()),\n None,\n )\n })?;\n Ok(Extension(extension))\n }\n}\n\nimpl FromToolCallContextPart for crate::Peer {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let peer = context.request_context.peer.clone();\n Ok(peer)\n }\n}\n\nimpl FromToolCallContextPart for crate::model::Meta {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let mut meta = crate::model::Meta::default();\n std::mem::swap(&mut meta, &mut context.request_context.meta);\n Ok(meta)\n }\n}\n\npub struct RequestId(pub crate::model::RequestId);\nimpl FromToolCallContextPart for RequestId {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(RequestId(context.request_context.id.clone()))\n }\n}\n\nimpl FromToolCallContextPart for RequestContext {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(context.request_context.clone())\n }\n}\n\nimpl<'s, S> ToolCallContext<'s, S> {\n pub fn invoke(self, h: H) -> BoxFuture<'s, Result>\n where\n H: CallToolHandler,\n {\n h.call(self)\n }\n}\n#[allow(clippy::type_complexity)]\npub struct AsyncAdapter(PhantomData fn(Fut) -> R>);\npub struct SyncAdapter(PhantomData R>);\n// #[allow(clippy::type_complexity)]\npub struct AsyncMethodAdapter(PhantomData R>);\npub struct SyncMethodAdapter(PhantomData R>);\n\nmacro_rules! impl_for {\n ($($T: ident)*) => {\n impl_for!([] [$($T)*]);\n };\n // finished\n ([$($Tn: ident)*] []) => {\n impl_for!(@impl $($Tn)*);\n };\n ([$($Tn: ident)*] [$Tn_1: ident $($Rest: ident)*]) => {\n impl_for!(@impl $($Tn)*);\n impl_for!([$($Tn)* $Tn_1] [$($Rest)*]);\n };\n (@impl $($Tn: ident)*) => {\n impl<$($Tn,)* S, F, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart ,\n )*\n F: FnOnce(&S, $($Tn,)*) -> BoxFuture<'_, R>,\n\n // Need RTN support here(I guess), https://github.com/rust-lang/rust/pull/138424\n // Fut: Future + Send + 'a,\n R: IntoCallToolResult + Send + 'static,\n S: Send + Sync + 'static,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext<'_, S>,\n ) -> BoxFuture<'_, Result>{\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n let service = context.service;\n let fut = self(service, $($Tn,)*);\n async move {\n let result = fut.await;\n result.into_call_tool_result()\n }.boxed()\n }\n }\n\n impl<$($Tn,)* S, F, Fut, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart ,\n )*\n F: FnOnce($($Tn,)*) -> Fut + Send + ,\n Fut: Future + Send + 'static,\n R: IntoCallToolResult + Send + 'static,\n S: Send + Sync,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext,\n ) -> BoxFuture<'static, Result>{\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n let fut = self($($Tn,)*);\n async move {\n let result = fut.await;\n result.into_call_tool_result()\n }.boxed()\n }\n }\n\n impl<$($Tn,)* S, F, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart + ,\n )*\n F: FnOnce(&S, $($Tn,)*) -> R + Send + ,\n R: IntoCallToolResult + Send + ,\n S: Send + Sync,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext,\n ) -> BoxFuture<'static, Result> {\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n std::future::ready(self(context.service, $($Tn,)*).into_call_tool_result()).boxed()\n }\n }\n\n impl<$($Tn,)* S, F, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart + ,\n )*\n F: FnOnce($($Tn,)*) -> R + Send + ,\n R: IntoCallToolResult + Send + ,\n S: Send + Sync,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext,\n ) -> BoxFuture<'static, Result> {\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n std::future::ready(self($($Tn,)*).into_call_tool_result()).boxed()\n }\n }\n };\n}\nimpl_for!(T0 T1 T2 T3 T4 T5 T6 T7 T8 T9 T10 T11 T12 T13 T14 T15);\n"], ["/rust-sdk/crates/rmcp/src/model/extension.rs", "//! A container for those extra data could be carried on request or notification.\n//!\n//! This file is copied and modified from crate [http](https://github.com/hyperium/http).\n//!\n//! - Original code license: \n//! - Original code: \nuse std::{\n any::{Any, TypeId},\n collections::HashMap,\n fmt,\n hash::{BuildHasherDefault, Hasher},\n};\n\ntype AnyMap = HashMap, BuildHasherDefault>;\n\n// With TypeIds as keys, there's no need to hash them. They are already hashes\n// themselves, coming from the compiler. The IdHasher just holds the u64 of\n// the TypeId, and then returns it, instead of doing any bit fiddling.\n#[derive(Default)]\nstruct IdHasher(u64);\n\nimpl Hasher for IdHasher {\n fn write(&mut self, _: &[u8]) {\n unreachable!(\"TypeId calls write_u64\");\n }\n\n #[inline]\n fn write_u64(&mut self, id: u64) {\n self.0 = id;\n }\n\n #[inline]\n fn finish(&self) -> u64 {\n self.0\n }\n}\n\n/// A type map of protocol extensions.\n///\n/// `Extensions` can be used by `Request` `Notification` and `Response` to store\n/// extra data derived from the underlying protocol.\n#[derive(Clone, Default)]\npub struct Extensions {\n // If extensions are never used, no need to carry around an empty HashMap.\n // That's 3 words. Instead, this is only 1 word.\n map: Option>,\n}\n\nimpl Extensions {\n /// Create an empty `Extensions`.\n #[inline]\n pub const fn new() -> Extensions {\n Extensions { map: None }\n }\n\n /// Insert a type into this `Extensions`.\n ///\n /// If a extension of this type already existed, it will\n /// be returned and replaced with the new one.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert!(ext.insert(5i32).is_none());\n /// assert!(ext.insert(4u8).is_none());\n /// assert_eq!(ext.insert(9i32), Some(5i32));\n /// ```\n pub fn insert(&mut self, val: T) -> Option {\n self.map\n .get_or_insert_with(Box::default)\n .insert(TypeId::of::(), Box::new(val))\n .and_then(|boxed| boxed.into_any().downcast().ok().map(|boxed| *boxed))\n }\n\n /// Get a reference to a type previously inserted on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert!(ext.get::().is_none());\n /// ext.insert(5i32);\n ///\n /// assert_eq!(ext.get::(), Some(&5i32));\n /// ```\n pub fn get(&self) -> Option<&T> {\n self.map\n .as_ref()\n .and_then(|map| map.get(&TypeId::of::()))\n .and_then(|boxed| (**boxed).as_any().downcast_ref())\n }\n\n /// Get a mutable reference to a type previously inserted on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// ext.insert(String::from(\"Hello\"));\n /// ext.get_mut::().unwrap().push_str(\" World\");\n ///\n /// assert_eq!(ext.get::().unwrap(), \"Hello World\");\n /// ```\n pub fn get_mut(&mut self) -> Option<&mut T> {\n self.map\n .as_mut()\n .and_then(|map| map.get_mut(&TypeId::of::()))\n .and_then(|boxed| (**boxed).as_any_mut().downcast_mut())\n }\n\n /// Get a mutable reference to a type, inserting `value` if not already present on this\n /// `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// *ext.get_or_insert(1i32) += 2;\n ///\n /// assert_eq!(*ext.get::().unwrap(), 3);\n /// ```\n pub fn get_or_insert(&mut self, value: T) -> &mut T {\n self.get_or_insert_with(|| value)\n }\n\n /// Get a mutable reference to a type, inserting the value created by `f` if not already present\n /// on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// *ext.get_or_insert_with(|| 1i32) += 2;\n ///\n /// assert_eq!(*ext.get::().unwrap(), 3);\n /// ```\n pub fn get_or_insert_with T>(\n &mut self,\n f: F,\n ) -> &mut T {\n let out = self\n .map\n .get_or_insert_with(Box::default)\n .entry(TypeId::of::())\n .or_insert_with(|| Box::new(f()));\n (**out).as_any_mut().downcast_mut().unwrap()\n }\n\n /// Get a mutable reference to a type, inserting the type's default value if not already present\n /// on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// *ext.get_or_insert_default::() += 2;\n ///\n /// assert_eq!(*ext.get::().unwrap(), 2);\n /// ```\n pub fn get_or_insert_default(&mut self) -> &mut T {\n self.get_or_insert_with(T::default)\n }\n\n /// Remove a type from this `Extensions`.\n ///\n /// If a extension of this type existed, it will be returned.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// ext.insert(5i32);\n /// assert_eq!(ext.remove::(), Some(5i32));\n /// assert!(ext.get::().is_none());\n /// ```\n pub fn remove(&mut self) -> Option {\n self.map\n .as_mut()\n .and_then(|map| map.remove(&TypeId::of::()))\n .and_then(|boxed| boxed.into_any().downcast().ok().map(|boxed| *boxed))\n }\n\n /// Clear the `Extensions` of all inserted extensions.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// ext.insert(5i32);\n /// ext.clear();\n ///\n /// assert!(ext.get::().is_none());\n /// ```\n #[inline]\n pub fn clear(&mut self) {\n if let Some(ref mut map) = self.map {\n map.clear();\n }\n }\n\n /// Check whether the extension set is empty or not.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert!(ext.is_empty());\n /// ext.insert(5i32);\n /// assert!(!ext.is_empty());\n /// ```\n #[inline]\n pub fn is_empty(&self) -> bool {\n self.map.as_ref().is_none_or(|map| map.is_empty())\n }\n\n /// Get the number of extensions available.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert_eq!(ext.len(), 0);\n /// ext.insert(5i32);\n /// assert_eq!(ext.len(), 1);\n /// ```\n #[inline]\n pub fn len(&self) -> usize {\n self.map.as_ref().map_or(0, |map| map.len())\n }\n\n /// Extends `self` with another `Extensions`.\n ///\n /// If an instance of a specific type exists in both, the one in `self` is overwritten with the\n /// one from `other`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext_a = Extensions::new();\n /// ext_a.insert(8u8);\n /// ext_a.insert(16u16);\n ///\n /// let mut ext_b = Extensions::new();\n /// ext_b.insert(4u8);\n /// ext_b.insert(\"hello\");\n ///\n /// ext_a.extend(ext_b);\n /// assert_eq!(ext_a.len(), 3);\n /// assert_eq!(ext_a.get::(), Some(&4u8));\n /// assert_eq!(ext_a.get::(), Some(&16u16));\n /// assert_eq!(ext_a.get::<&'static str>().copied(), Some(\"hello\"));\n /// ```\n pub fn extend(&mut self, other: Self) {\n if let Some(other) = other.map {\n if let Some(map) = &mut self.map {\n map.extend(*other);\n } else {\n self.map = Some(other);\n }\n }\n }\n}\n\nimpl fmt::Debug for Extensions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n f.debug_struct(\"Extensions\").finish()\n }\n}\n\ntrait AnyClone: Any {\n fn clone_box(&self) -> Box;\n fn as_any(&self) -> &dyn Any;\n fn as_any_mut(&mut self) -> &mut dyn Any;\n fn into_any(self: Box) -> Box;\n}\n\nimpl AnyClone for T {\n fn clone_box(&self) -> Box {\n Box::new(self.clone())\n }\n\n fn as_any(&self) -> &dyn Any {\n self\n }\n\n fn as_any_mut(&mut self) -> &mut dyn Any {\n self\n }\n\n fn into_any(self: Box) -> Box {\n self\n }\n}\n\nimpl Clone for Box {\n fn clone(&self) -> Self {\n (**self).clone_box()\n }\n}\n\n#[test]\nfn test_extensions() {\n #[derive(Clone, Debug, PartialEq)]\n struct MyType(i32);\n\n let mut extensions = Extensions::new();\n\n extensions.insert(5i32);\n extensions.insert(MyType(10));\n\n assert_eq!(extensions.get(), Some(&5i32));\n assert_eq!(extensions.get_mut(), Some(&mut 5i32));\n\n let ext2 = extensions.clone();\n\n assert_eq!(extensions.remove::(), Some(5i32));\n assert!(extensions.get::().is_none());\n\n // clone still has it\n assert_eq!(ext2.get(), Some(&5i32));\n assert_eq!(ext2.get(), Some(&MyType(10)));\n\n assert_eq!(extensions.get::(), None);\n assert_eq!(extensions.get(), Some(&MyType(10)));\n}\n"], ["/rust-sdk/crates/rmcp/src/model/content.rs", "//! Content sent around agents, extensions, and LLMs\n//! The various content types can be display to humans but also understood by models\n//! They include optional annotations used to help inform agent usage\nuse serde::{Deserialize, Serialize};\nuse serde_json::json;\n\nuse super::{AnnotateAble, Annotated, resource::ResourceContents};\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawTextContent {\n pub text: String,\n}\npub type TextContent = Annotated;\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawImageContent {\n /// The base64-encoded image\n pub data: String,\n pub mime_type: String,\n}\n\npub type ImageContent = Annotated;\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawEmbeddedResource {\n pub resource: ResourceContents,\n}\npub type EmbeddedResource = Annotated;\n\nimpl EmbeddedResource {\n pub fn get_text(&self) -> String {\n match &self.resource {\n ResourceContents::TextResourceContents { text, .. } => text.clone(),\n _ => String::new(),\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawAudioContent {\n pub data: String,\n pub mime_type: String,\n}\n\npub type AudioContent = Annotated;\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(tag = \"type\", rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum RawContent {\n Text(RawTextContent),\n Image(RawImageContent),\n Resource(RawEmbeddedResource),\n Audio(AudioContent),\n}\n\npub type Content = Annotated;\n\nimpl RawContent {\n pub fn json(json: S) -> Result {\n let json = serde_json::to_string(&json).map_err(|e| {\n crate::ErrorData::internal_error(\n \"fail to serialize response to json\",\n Some(json!(\n {\"reason\": e.to_string()}\n )),\n )\n })?;\n Ok(RawContent::text(json))\n }\n\n pub fn text>(text: S) -> Self {\n RawContent::Text(RawTextContent { text: text.into() })\n }\n\n pub fn image, T: Into>(data: S, mime_type: T) -> Self {\n RawContent::Image(RawImageContent {\n data: data.into(),\n mime_type: mime_type.into(),\n })\n }\n\n pub fn resource(resource: ResourceContents) -> Self {\n RawContent::Resource(RawEmbeddedResource { resource })\n }\n\n pub fn embedded_text, T: Into>(uri: S, content: T) -> Self {\n RawContent::Resource(RawEmbeddedResource {\n resource: ResourceContents::TextResourceContents {\n uri: uri.into(),\n mime_type: Some(\"text\".to_string()),\n text: content.into(),\n },\n })\n }\n\n /// Get the text content if this is a TextContent variant\n pub fn as_text(&self) -> Option<&RawTextContent> {\n match self {\n RawContent::Text(text) => Some(text),\n _ => None,\n }\n }\n\n /// Get the image content if this is an ImageContent variant\n pub fn as_image(&self) -> Option<&RawImageContent> {\n match self {\n RawContent::Image(image) => Some(image),\n _ => None,\n }\n }\n\n /// Get the resource content if this is an ImageContent variant\n pub fn as_resource(&self) -> Option<&RawEmbeddedResource> {\n match self {\n RawContent::Resource(resource) => Some(resource),\n _ => None,\n }\n }\n}\n\nimpl Content {\n pub fn text>(text: S) -> Self {\n RawContent::text(text).no_annotation()\n }\n\n pub fn image, T: Into>(data: S, mime_type: T) -> Self {\n RawContent::image(data, mime_type).no_annotation()\n }\n\n pub fn resource(resource: ResourceContents) -> Self {\n RawContent::resource(resource).no_annotation()\n }\n\n pub fn embedded_text, T: Into>(uri: S, content: T) -> Self {\n RawContent::embedded_text(uri, content).no_annotation()\n }\n\n pub fn json(json: S) -> Result {\n RawContent::json(json).map(|c| c.no_annotation())\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\npub struct JsonContent(S);\n/// Types that can be converted into a list of contents\npub trait IntoContents {\n fn into_contents(self) -> Vec;\n}\n\nimpl IntoContents for Content {\n fn into_contents(self) -> Vec {\n vec![self]\n }\n}\n\nimpl IntoContents for String {\n fn into_contents(self) -> Vec {\n vec![Content::text(self)]\n }\n}\n\nimpl IntoContents for () {\n fn into_contents(self) -> Vec {\n vec![]\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/auth/streamable_http_client.rs", "use crate::transport::{\n auth::AuthClient,\n streamable_http_client::{StreamableHttpClient, StreamableHttpError},\n};\nimpl StreamableHttpClient for AuthClient\nwhere\n C: StreamableHttpClient + Send + Sync,\n{\n type Error = StreamableHttpError;\n\n async fn delete_session(\n &self,\n uri: std::sync::Arc,\n session_id: std::sync::Arc,\n mut auth_token: Option,\n ) -> Result<(), crate::transport::streamable_http_client::StreamableHttpError>\n {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .delete_session(uri, session_id, auth_token)\n .await\n .map_err(StreamableHttpError::Client)\n }\n\n async fn get_stream(\n &self,\n uri: std::sync::Arc,\n session_id: std::sync::Arc,\n last_event_id: Option,\n mut auth_token: Option,\n ) -> Result<\n futures::stream::BoxStream<'static, Result>,\n crate::transport::streamable_http_client::StreamableHttpError,\n > {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .get_stream(uri, session_id, last_event_id, auth_token)\n .await\n .map_err(StreamableHttpError::Client)\n }\n\n async fn post_message(\n &self,\n uri: std::sync::Arc,\n message: crate::model::ClientJsonRpcMessage,\n session_id: Option>,\n mut auth_token: Option,\n ) -> Result<\n crate::transport::streamable_http_client::StreamableHttpPostResponse,\n StreamableHttpError,\n > {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .post_message(uri, message, session_id, auth_token)\n .await\n .map_err(StreamableHttpError::Client)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/client/progress.rs", "use std::{collections::HashMap, sync::Arc};\n\nuse futures::{Stream, StreamExt};\nuse tokio::sync::RwLock;\nuse tokio_stream::wrappers::ReceiverStream;\n\nuse crate::model::{ProgressNotificationParam, ProgressToken};\ntype Dispatcher =\n Arc>>>;\n\n/// A dispatcher for progress notifications.\n#[derive(Debug, Clone, Default)]\npub struct ProgressDispatcher {\n pub(crate) dispatcher: Dispatcher,\n}\n\nimpl ProgressDispatcher {\n const CHANNEL_SIZE: usize = 16;\n pub fn new() -> Self {\n Self::default()\n }\n\n /// Handle a progress notification by sending it to the appropriate subscriber\n pub async fn handle_notification(&self, notification: ProgressNotificationParam) {\n let token = ¬ification.progress_token;\n if let Some(sender) = self.dispatcher.read().await.get(token).cloned() {\n let send_result = sender.send(notification).await;\n if let Err(e) = send_result {\n tracing::warn!(\"Failed to send progress notification: {e}\");\n }\n }\n }\n\n /// Subscribe to progress notifications for a specific token.\n ///\n /// If you drop the returned `ProgressSubscriber`, it will automatically unsubscribe from notifications for that token.\n pub async fn subscribe(&self, progress_token: ProgressToken) -> ProgressSubscriber {\n let (sender, receiver) = tokio::sync::mpsc::channel(Self::CHANNEL_SIZE);\n self.dispatcher\n .write()\n .await\n .insert(progress_token.clone(), sender);\n let receiver = ReceiverStream::new(receiver);\n ProgressSubscriber {\n progress_token,\n receiver,\n dispacher: self.dispatcher.clone(),\n }\n }\n\n /// Unsubscribe from progress notifications for a specific token.\n pub async fn unsubscribe(&self, token: &ProgressToken) {\n self.dispatcher.write().await.remove(token);\n }\n\n /// Clear all dispachter.\n pub async fn clear(&self) {\n let mut dispacher = self.dispatcher.write().await;\n dispacher.clear();\n }\n}\n\npub struct ProgressSubscriber {\n pub(crate) progress_token: ProgressToken,\n pub(crate) receiver: ReceiverStream,\n pub(crate) dispacher: Dispatcher,\n}\n\nimpl ProgressSubscriber {\n pub fn progress_token(&self) -> &ProgressToken {\n &self.progress_token\n }\n}\n\nimpl Stream for ProgressSubscriber {\n type Item = ProgressNotificationParam;\n\n fn poll_next(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.receiver.poll_next_unpin(cx)\n }\n\n fn size_hint(&self) -> (usize, Option) {\n self.receiver.size_hint()\n }\n}\n\nimpl Drop for ProgressSubscriber {\n fn drop(&mut self) {\n let token = self.progress_token.clone();\n self.receiver.close();\n let dispatcher = self.dispacher.clone();\n tokio::spawn(async move {\n let mut dispacher = dispatcher.write_owned().await;\n dispacher.remove(&token);\n });\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/meta.rs", "use std::ops::{Deref, DerefMut};\n\nuse serde::{Deserialize, Serialize};\nuse serde_json::Value;\n\nuse super::{\n ClientNotification, ClientRequest, Extensions, JsonObject, JsonRpcMessage, NumberOrString,\n ProgressToken, ServerNotification, ServerRequest,\n};\n\npub trait GetMeta {\n fn get_meta_mut(&mut self) -> &mut Meta;\n fn get_meta(&self) -> &Meta;\n}\n\npub trait GetExtensions {\n fn extensions(&self) -> &Extensions;\n fn extensions_mut(&mut self) -> &mut Extensions;\n}\n\nmacro_rules! variant_extension {\n (\n $Enum: ident {\n $($variant: ident)*\n }\n ) => {\n impl GetExtensions for $Enum {\n fn extensions(&self) -> &Extensions {\n match self {\n $(\n $Enum::$variant(v) => &v.extensions,\n )*\n }\n }\n fn extensions_mut(&mut self) -> &mut Extensions {\n match self {\n $(\n $Enum::$variant(v) => &mut v.extensions,\n )*\n }\n }\n }\n impl GetMeta for $Enum {\n fn get_meta_mut(&mut self) -> &mut Meta {\n self.extensions_mut().get_or_insert_default()\n }\n fn get_meta(&self) -> &Meta {\n self.extensions().get::().unwrap_or(Meta::static_empty())\n }\n }\n };\n}\n\nvariant_extension! {\n ClientRequest {\n PingRequest\n InitializeRequest\n CompleteRequest\n SetLevelRequest\n GetPromptRequest\n ListPromptsRequest\n ListResourcesRequest\n ListResourceTemplatesRequest\n ReadResourceRequest\n SubscribeRequest\n UnsubscribeRequest\n CallToolRequest\n ListToolsRequest\n }\n}\n\nvariant_extension! {\n ServerRequest {\n PingRequest\n CreateMessageRequest\n ListRootsRequest\n }\n}\n\nvariant_extension! {\n ClientNotification {\n CancelledNotification\n ProgressNotification\n InitializedNotification\n RootsListChangedNotification\n }\n}\n\nvariant_extension! {\n ServerNotification {\n CancelledNotification\n ProgressNotification\n LoggingMessageNotification\n ResourceUpdatedNotification\n ResourceListChangedNotification\n ToolListChangedNotification\n PromptListChangedNotification\n }\n}\n#[derive(Debug, Serialize, Deserialize, Clone, Default)]\n#[serde(transparent)]\npub struct Meta(pub JsonObject);\nconst PROGRESS_TOKEN_FIELD: &str = \"progressToken\";\nimpl Meta {\n pub fn new() -> Self {\n Self(JsonObject::new())\n }\n\n pub(crate) fn static_empty() -> &'static Self {\n static EMPTY: std::sync::OnceLock = std::sync::OnceLock::new();\n EMPTY.get_or_init(Default::default)\n }\n\n pub fn get_progress_token(&self) -> Option {\n self.0.get(PROGRESS_TOKEN_FIELD).and_then(|v| match v {\n Value::String(s) => Some(ProgressToken(NumberOrString::String(s.to_string().into()))),\n Value::Number(n) => n\n .as_u64()\n .map(|n| ProgressToken(NumberOrString::Number(n as u32))),\n _ => None,\n })\n }\n\n pub fn set_progress_token(&mut self, token: ProgressToken) {\n match token.0 {\n NumberOrString::String(ref s) => self.0.insert(\n PROGRESS_TOKEN_FIELD.to_string(),\n Value::String(s.to_string()),\n ),\n NumberOrString::Number(n) => self\n .0\n .insert(PROGRESS_TOKEN_FIELD.to_string(), Value::Number(n.into())),\n };\n }\n\n pub fn extend(&mut self, other: Meta) {\n for (k, v) in other.0.into_iter() {\n self.0.insert(k, v);\n }\n }\n}\n\nimpl Deref for Meta {\n type Target = JsonObject;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl DerefMut for Meta {\n fn deref_mut(&mut self) -> &mut Self::Target {\n &mut self.0\n }\n}\n\nimpl JsonRpcMessage\nwhere\n Req: GetExtensions,\n Noti: GetExtensions,\n{\n pub fn insert_extension(&mut self, value: T) {\n match self {\n JsonRpcMessage::Request(json_rpc_request) => {\n json_rpc_request.request.extensions_mut().insert(value);\n }\n JsonRpcMessage::Notification(json_rpc_notification) => {\n json_rpc_notification\n .notification\n .extensions_mut()\n .insert(value);\n }\n JsonRpcMessage::BatchRequest(json_rpc_batch_request_items) => {\n for item in json_rpc_batch_request_items {\n match item {\n super::JsonRpcBatchRequestItem::Request(json_rpc_request) => {\n json_rpc_request\n .request\n .extensions_mut()\n .insert(value.clone());\n }\n super::JsonRpcBatchRequestItem::Notification(json_rpc_notification) => {\n json_rpc_notification\n .notification\n .extensions_mut()\n .insert(value.clone());\n }\n }\n }\n }\n _ => {}\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/child_process.rs", "use std::process::Stdio;\n\nuse process_wrap::tokio::{TokioChildWrapper, TokioCommandWrap};\nuse tokio::{\n io::AsyncRead,\n process::{ChildStderr, ChildStdin, ChildStdout},\n};\n\nuse super::{IntoTransport, Transport};\nuse crate::service::ServiceRole;\n\n/// The parts of a child process.\ntype ChildProcessParts = (\n Box,\n ChildStdout,\n ChildStdin,\n Option,\n);\n\n/// Extract the stdio handles from a spawned child.\n/// Returns `(child, stdout, stdin, stderr)` where `stderr` is `Some` only\n/// if the process was spawned with `Stdio::piped()`.\n#[inline]\nfn child_process(mut child: Box) -> std::io::Result {\n let child_stdin = match child.inner_mut().stdin().take() {\n Some(stdin) => stdin,\n None => return Err(std::io::Error::other(\"stdin was already taken\")),\n };\n let child_stdout = match child.inner_mut().stdout().take() {\n Some(stdout) => stdout,\n None => return Err(std::io::Error::other(\"stdout was already taken\")),\n };\n let child_stderr = child.inner_mut().stderr().take();\n Ok((child, child_stdout, child_stdin, child_stderr))\n}\n\npub struct TokioChildProcess {\n child: ChildWithCleanup,\n child_stdin: ChildStdin,\n child_stdout: ChildStdout,\n}\n\npub struct ChildWithCleanup {\n inner: Box,\n}\n\nimpl Drop for ChildWithCleanup {\n fn drop(&mut self) {\n if let Err(e) = self.inner.start_kill() {\n tracing::warn!(\"Failed to kill child process: {e}\");\n }\n }\n}\n\n// we hold the child process with stdout, for it's easier to implement AsyncRead\npin_project_lite::pin_project! {\n pub struct TokioChildProcessOut {\n child: ChildWithCleanup,\n #[pin]\n child_stdout: ChildStdout,\n }\n}\n\nimpl TokioChildProcessOut {\n /// Get the process ID of the child process.\n pub fn id(&self) -> Option {\n self.child.inner.id()\n }\n}\n\nimpl AsyncRead for TokioChildProcessOut {\n fn poll_read(\n self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n buf: &mut tokio::io::ReadBuf<'_>,\n ) -> std::task::Poll> {\n self.project().child_stdout.poll_read(cx, buf)\n }\n}\n\nimpl TokioChildProcess {\n /// Convenience: spawn with default `piped` stdio\n pub fn new(command: impl Into) -> std::io::Result {\n let (proc, _ignored) = TokioChildProcessBuilder::new(command).spawn()?;\n Ok(proc)\n }\n\n /// Builder entry-point allowing fine-grained stdio control.\n pub fn builder(command: impl Into) -> TokioChildProcessBuilder {\n TokioChildProcessBuilder::new(command)\n }\n\n /// Get the process ID of the child process.\n pub fn id(&self) -> Option {\n self.child.inner.id()\n }\n\n /// Split this helper into a reader (stdout) and writer (stdin).\n pub fn split(self) -> (TokioChildProcessOut, ChildStdin) {\n let TokioChildProcess {\n child,\n child_stdin,\n child_stdout,\n } = self;\n (\n TokioChildProcessOut {\n child,\n child_stdout,\n },\n child_stdin,\n )\n }\n}\n\n/// Builder for `TokioChildProcess` allowing custom `Stdio` configuration.\npub struct TokioChildProcessBuilder {\n cmd: TokioCommandWrap,\n stdin: Stdio,\n stdout: Stdio,\n stderr: Stdio,\n}\n\nimpl TokioChildProcessBuilder {\n fn new(cmd: impl Into) -> Self {\n Self {\n cmd: cmd.into(),\n stdin: Stdio::piped(),\n stdout: Stdio::piped(),\n stderr: Stdio::inherit(),\n }\n }\n\n /// Override the child stdin configuration.\n pub fn stdin(mut self, io: impl Into) -> Self {\n self.stdin = io.into();\n self\n }\n /// Override the child stdout configuration.\n pub fn stdout(mut self, io: impl Into) -> Self {\n self.stdout = io.into();\n self\n }\n /// Override the child stderr configuration.\n pub fn stderr(mut self, io: impl Into) -> Self {\n self.stderr = io.into();\n self\n }\n\n /// Spawn the child process. Returns the transport plus an optional captured stderr handle.\n pub fn spawn(mut self) -> std::io::Result<(TokioChildProcess, Option)> {\n self.cmd\n .command_mut()\n .stdin(self.stdin)\n .stdout(self.stdout)\n .stderr(self.stderr);\n\n let (child, stdout, stdin, stderr_opt) = child_process(self.cmd.spawn()?)?;\n\n let proc = TokioChildProcess {\n child: ChildWithCleanup { inner: child },\n child_stdin: stdin,\n child_stdout: stdout,\n };\n Ok((proc, stderr_opt))\n }\n}\n\nimpl IntoTransport for TokioChildProcess {\n fn into_transport(self) -> impl Transport + 'static {\n IntoTransport::::into_transport(\n self.split(),\n )\n }\n}\n\npub trait ConfigureCommandExt {\n fn configure(self, f: impl FnOnce(&mut Self)) -> Self;\n}\n\nimpl ConfigureCommandExt for tokio::process::Command {\n fn configure(mut self, f: impl FnOnce(&mut Self)) -> Self {\n f(&mut self);\n self\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/auth/sse_client.rs", "use http::Uri;\n\nuse crate::transport::{\n auth::AuthClient,\n sse_client::{SseClient, SseTransportError},\n};\nimpl SseClient for AuthClient\nwhere\n C: SseClient,\n{\n type Error = SseTransportError;\n\n async fn post_message(\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n mut auth_token: Option,\n ) -> Result<(), SseTransportError> {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .post_message(uri, message, auth_token)\n .await\n .map_err(SseTransportError::Client)\n }\n\n async fn get_stream(\n &self,\n uri: Uri,\n last_event_id: Option,\n mut auth_token: Option,\n ) -> Result<\n crate::transport::common::client_side_sse::BoxedSseResponse,\n SseTransportError,\n > {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .get_stream(uri, last_event_id, auth_token)\n .await\n .map_err(SseTransportError::Client)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/capabilities.rs", "use std::{collections::BTreeMap, marker::PhantomData};\n\nuse paste::paste;\nuse serde::{Deserialize, Serialize};\n\nuse super::JsonObject;\npub type ExperimentalCapabilities = BTreeMap;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptsCapability {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub list_changed: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ResourcesCapability {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub subscribe: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub list_changed: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ToolsCapability {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub list_changed: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RootsCapabilities {\n pub list_changed: Option,\n}\n\n///\n/// # Builder\n/// ```rust\n/// # use rmcp::model::ClientCapabilities;\n/// let cap = ClientCapabilities::builder()\n/// .enable_experimental()\n/// .enable_roots()\n/// .enable_roots_list_changed()\n/// .build();\n/// ```\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ClientCapabilities {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub experimental: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub roots: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub sampling: Option,\n}\n\n///\n/// ## Builder\n/// ```rust\n/// # use rmcp::model::ServerCapabilities;\n/// let cap = ServerCapabilities::builder()\n/// .enable_logging()\n/// .enable_experimental()\n/// .enable_prompts()\n/// .enable_resources()\n/// .enable_tools()\n/// .enable_tool_list_changed()\n/// .build();\n/// ```\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ServerCapabilities {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub experimental: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub logging: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub completions: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub prompts: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub resources: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub tools: Option,\n}\n\nmacro_rules! builder {\n ($Target: ident {$($f: ident: $T: ty),* $(,)?}) => {\n paste! {\n #[derive(Default, Clone, Copy, Debug)]\n pub struct [<$Target BuilderState>]<\n $(const [<$f:upper>]: bool = false,)*\n >;\n #[derive(Debug, Default)]\n pub struct [<$Target Builder>]]> {\n $(pub $f: Option<$T>,)*\n pub state: PhantomData\n }\n impl $Target {\n #[doc = \"Create a new [`\" $Target \"`] builder.\"]\n pub fn builder() -> [<$Target Builder>] {\n <[<$Target Builder>]>::default()\n }\n }\n impl [<$Target Builder>] {\n pub fn build(self) -> $Target {\n $Target {\n $( $f: self.$f, )*\n }\n }\n }\n impl From<[<$Target Builder>]> for $Target {\n fn from(builder: [<$Target Builder>]) -> Self {\n builder.build()\n }\n }\n }\n builder!($Target @toggle $($f: $T,) *);\n\n };\n ($Target: ident @toggle $f0: ident: $T0: ty, $($f: ident: $T: ty,)*) => {\n builder!($Target @toggle [][$f0: $T0][$($f: $T,)*]);\n };\n ($Target: ident @toggle [$($ff: ident: $Tf: ty,)*][$fn: ident: $TN: ty][$fn_1: ident: $Tn_1: ty, $($ft: ident: $Tt: ty,)*]) => {\n builder!($Target @impl_toggle [$($ff: $Tf,)*][$fn: $TN][$fn_1: $Tn_1, $($ft:$Tt,)*]);\n builder!($Target @toggle [$($ff: $Tf,)* $fn: $TN,][$fn_1: $Tn_1][$($ft:$Tt,)*]);\n };\n ($Target: ident @toggle [$($ff: ident: $Tf: ty,)*][$fn: ident: $TN: ty][]) => {\n builder!($Target @impl_toggle [$($ff: $Tf,)*][$fn: $TN][]);\n };\n ($Target: ident @impl_toggle [$($ff: ident: $Tf: ty,)*][$fn: ident: $TN: ty][$($ft: ident: $Tt: ty,)*]) => {\n paste! {\n impl<\n $(const [<$ff:upper>]: bool,)*\n $(const [<$ft:upper>]: bool,)*\n > [<$Target Builder>]<[<$Target BuilderState>]<\n $([<$ff:upper>],)*\n false,\n $([<$ft:upper>],)*\n >> {\n pub fn [](self) -> [<$Target Builder>]<[<$Target BuilderState>]<\n $([<$ff:upper>],)*\n true,\n $([<$ft:upper>],)*\n >> {\n [<$Target Builder>] {\n $( $ff: self.$ff, )*\n $fn: Some($TN::default()),\n $( $ft: self.$ft, )*\n state: PhantomData\n }\n }\n pub fn [](self, $fn: $TN) -> [<$Target Builder>]<[<$Target BuilderState>]<\n $([<$ff:upper>],)*\n true,\n $([<$ft:upper>],)*\n >> {\n [<$Target Builder>] {\n $( $ff: self.$ff, )*\n $fn: Some($fn),\n $( $ft: self.$ft, )*\n state: PhantomData\n }\n }\n }\n // do we really need to disable some thing in builder?\n // impl<\n // $(const [<$ff:upper>]: bool,)*\n // $(const [<$ft:upper>]: bool,)*\n // > [<$Target Builder>]<[<$Target BuilderState>]<\n // $([<$ff:upper>],)*\n // true,\n // $([<$ft:upper>],)*\n // >> {\n // pub fn [](self) -> [<$Target Builder>]<[<$Target BuilderState>]<\n // $([<$ff:upper>],)*\n // false,\n // $([<$ft:upper>],)*\n // >> {\n // [<$Target Builder>] {\n // $( $ff: self.$ff, )*\n // $fn: None,\n // $( $ft: self.$ft, )*\n // state: PhantomData\n // }\n // }\n // }\n }\n }\n}\n\nbuilder! {\n ServerCapabilities {\n experimental: ExperimentalCapabilities,\n logging: JsonObject,\n completions: JsonObject,\n prompts: PromptsCapability,\n resources: ResourcesCapability,\n tools: ToolsCapability\n }\n}\n\nimpl\n ServerCapabilitiesBuilder>\n{\n pub fn enable_tool_list_changed(mut self) -> Self {\n if let Some(c) = self.tools.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n}\n\nimpl\n ServerCapabilitiesBuilder>\n{\n pub fn enable_prompts_list_changed(mut self) -> Self {\n if let Some(c) = self.prompts.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n}\n\nimpl\n ServerCapabilitiesBuilder>\n{\n pub fn enable_resources_list_changed(mut self) -> Self {\n if let Some(c) = self.resources.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n\n pub fn enable_resources_subscribe(mut self) -> Self {\n if let Some(c) = self.resources.as_mut() {\n c.subscribe = Some(true);\n }\n self\n }\n}\n\nbuilder! {\n ClientCapabilities{\n experimental: ExperimentalCapabilities,\n roots: RootsCapabilities,\n sampling: JsonObject,\n }\n}\n\nimpl\n ClientCapabilitiesBuilder>\n{\n pub fn enable_roots_list_changed(mut self) -> Self {\n if let Some(c) = self.roots.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n}\n\n#[cfg(test)]\nmod test {\n use super::*;\n #[test]\n fn test_builder() {\n let builder = ::default()\n .enable_logging()\n .enable_experimental()\n .enable_prompts()\n .enable_resources()\n .enable_tools()\n .enable_tool_list_changed();\n assert_eq!(builder.logging, Some(JsonObject::default()));\n assert_eq!(builder.prompts, Some(PromptsCapability::default()));\n assert_eq!(builder.resources, Some(ResourcesCapability::default()));\n assert_eq!(\n builder.tools,\n Some(ToolsCapability {\n list_changed: Some(true),\n })\n );\n assert_eq!(\n builder.experimental,\n Some(ExperimentalCapabilities::default())\n );\n let client_builder = ::default()\n .enable_experimental()\n .enable_roots()\n .enable_roots_list_changed()\n .enable_sampling();\n assert_eq!(\n client_builder.experimental,\n Some(ExperimentalCapabilities::default())\n );\n assert_eq!(\n client_builder.roots,\n Some(RootsCapabilities {\n list_changed: Some(true),\n })\n );\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/lib.rs", "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, allow(unused_attributes))]\n//! The official Rust SDK for the Model Context Protocol (MCP).\n//!\n//! The MCP is a protocol that allows AI assistants to communicate with other\n//! services. `rmcp` is the official Rust implementation of this protocol.\n//!\n//! There are two ways in which the library can be used, namely to build a\n//! server or to build a client.\n//!\n//! ## Server\n//!\n//! A server is a service that exposes capabilities. For example, a common\n//! use-case is for the server to make multiple tools available to clients such\n//! as Claude Desktop or the Cursor IDE.\n//!\n//! For example, to implement a server that has a tool that can count, you would\n//! make an object for that tool and add an implementation with the `#[tool_router]` macro:\n//!\n//! ```rust\n//! use std::sync::Arc;\n//! use rmcp::{ErrorData as McpError, model::*, tool, tool_router, handler::server::tool::ToolRouter};\n//! use tokio::sync::Mutex;\n//!\n//! #[derive(Clone)]\n//! pub struct Counter {\n//! counter: Arc>,\n//! tool_router: ToolRouter,\n//! }\n//!\n//! #[tool_router]\n//! impl Counter {\n//! fn new() -> Self {\n//! Self {\n//! counter: Arc::new(Mutex::new(0)),\n//! tool_router: Self::tool_router(),\n//! }\n//! }\n//!\n//! #[tool(description = \"Increment the counter by 1\")]\n//! async fn increment(&self) -> Result {\n//! let mut counter = self.counter.lock().await;\n//! *counter += 1;\n//! Ok(CallToolResult::success(vec![Content::text(\n//! counter.to_string(),\n//! )]))\n//! }\n//! }\n//! ```\n//!\n//! Next also implement [ServerHandler] for `Counter` and start the server inside\n//! `main` by calling `Counter::new().serve(...)`. See the examples directory in the repository for more information.\n//!\n//! ## Client\n//!\n//! A client can be used to interact with a server. Clients can be used to get a\n//! list of the available tools and to call them. For example, we can `uv` to\n//! start a MCP server in Python and then list the tools and call `git status`\n//! as follows:\n//!\n//! ```rust\n//! use anyhow::Result;\n//! use rmcp::{model::CallToolRequestParam, service::ServiceExt, transport::{TokioChildProcess, ConfigureCommandExt}};\n//! use tokio::process::Command;\n//!\n//! async fn client() -> Result<()> {\n//! let service = ().serve(TokioChildProcess::new(Command::new(\"uvx\").configure(|cmd| {\n//! cmd.arg(\"mcp-server-git\");\n//! }))?).await?;\n//!\n//! // Initialize\n//! let server_info = service.peer_info();\n//! println!(\"Connected to server: {server_info:#?}\");\n//!\n//! // List tools\n//! let tools = service.list_tools(Default::default()).await?;\n//! println!(\"Available tools: {tools:#?}\");\n//!\n//! // Call tool 'git_status' with arguments = {\"repo_path\": \".\"}\n//! let tool_result = service\n//! .call_tool(CallToolRequestParam {\n//! name: \"git_status\".into(),\n//! arguments: serde_json::json!({ \"repo_path\": \".\" }).as_object().cloned(),\n//! })\n//! .await?;\n//! println!(\"Tool result: {tool_result:#?}\");\n//!\n//! service.cancel().await?;\n//! Ok(())\n//! }\n//! ```\nmod error;\n#[allow(deprecated)]\npub use error::{Error, ErrorData, RmcpError};\n\n/// Basic data types in MCP specification\npub mod model;\n#[cfg(any(feature = \"client\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(any(feature = \"client\", feature = \"server\"))))]\npub mod service;\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub use handler::client::ClientHandler;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub use handler::server::ServerHandler;\n#[cfg(any(feature = \"client\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(any(feature = \"client\", feature = \"server\"))))]\npub use service::{Peer, Service, ServiceError, ServiceExt};\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub use service::{RoleClient, serve_client};\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub use service::{RoleServer, serve_server};\n\npub mod handler;\npub mod transport;\n\n// re-export\n#[cfg(all(feature = \"macros\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(all(feature = \"macros\", feature = \"server\"))))]\npub use paste::paste;\n#[cfg(all(feature = \"macros\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(all(feature = \"macros\", feature = \"server\"))))]\npub use rmcp_macros::*;\n#[cfg(all(feature = \"macros\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(all(feature = \"macros\", feature = \"server\"))))]\npub use schemars;\n#[cfg(feature = \"macros\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"macros\")))]\npub use serde;\n#[cfg(feature = \"macros\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"macros\")))]\npub use serde_json;\n"], ["/rust-sdk/crates/rmcp/src/handler/server/router/tool.rs", "use std::{borrow::Cow, sync::Arc};\n\nuse futures::{FutureExt, future::BoxFuture};\nuse schemars::JsonSchema;\n\nuse crate::{\n handler::server::tool::{\n CallToolHandler, DynCallToolHandler, ToolCallContext, schema_for_type,\n },\n model::{CallToolResult, Tool, ToolAnnotations},\n};\n\npub struct ToolRoute {\n #[allow(clippy::type_complexity)]\n pub call: Arc>,\n pub attr: crate::model::Tool,\n}\n\nimpl std::fmt::Debug for ToolRoute {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"ToolRoute\")\n .field(\"name\", &self.attr.name)\n .field(\"description\", &self.attr.description)\n .field(\"input_schema\", &self.attr.input_schema)\n .finish()\n }\n}\n\nimpl Clone for ToolRoute {\n fn clone(&self) -> Self {\n Self {\n call: self.call.clone(),\n attr: self.attr.clone(),\n }\n }\n}\n\nimpl ToolRoute {\n pub fn new(attr: impl Into, call: C) -> Self\n where\n C: CallToolHandler + Send + Sync + Clone + 'static,\n {\n Self {\n call: Arc::new(move |context: ToolCallContext| {\n let call = call.clone();\n context.invoke(call).boxed()\n }),\n attr: attr.into(),\n }\n }\n pub fn new_dyn(attr: impl Into, call: C) -> Self\n where\n C: for<'a> Fn(\n ToolCallContext<'a, S>,\n ) -> BoxFuture<'a, Result>\n + Send\n + Sync\n + 'static,\n {\n Self {\n call: Arc::new(call),\n attr: attr.into(),\n }\n }\n pub fn name(&self) -> &str {\n &self.attr.name\n }\n}\n\npub trait IntoToolRoute {\n fn into_tool_route(self) -> ToolRoute;\n}\n\nimpl IntoToolRoute for (T, C)\nwhere\n S: Send + Sync + 'static,\n C: CallToolHandler + Send + Sync + Clone + 'static,\n T: Into,\n{\n fn into_tool_route(self) -> ToolRoute {\n ToolRoute::new(self.0.into(), self.1)\n }\n}\n\nimpl IntoToolRoute for ToolRoute\nwhere\n S: Send + Sync + 'static,\n{\n fn into_tool_route(self) -> ToolRoute {\n self\n }\n}\n\npub struct ToolAttrGenerateFunctionAdapter;\nimpl IntoToolRoute for F\nwhere\n S: Send + Sync + 'static,\n F: Fn() -> ToolRoute,\n{\n fn into_tool_route(self) -> ToolRoute {\n (self)()\n }\n}\n\npub trait CallToolHandlerExt: Sized\nwhere\n Self: CallToolHandler + Send + Sync + Clone + 'static,\n{\n fn name(self, name: impl Into>) -> WithToolAttr;\n}\n\nimpl CallToolHandlerExt for C\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n{\n fn name(self, name: impl Into>) -> WithToolAttr {\n WithToolAttr {\n attr: Tool::new(\n name.into(),\n \"\",\n schema_for_type::(),\n ),\n call: self,\n _marker: std::marker::PhantomData,\n }\n }\n}\n\npub struct WithToolAttr\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n{\n pub attr: crate::model::Tool,\n pub call: C,\n pub _marker: std::marker::PhantomData,\n}\n\nimpl IntoToolRoute for WithToolAttr\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n S: Send + Sync + 'static,\n{\n fn into_tool_route(self) -> ToolRoute {\n ToolRoute::new(self.attr, self.call)\n }\n}\n\nimpl WithToolAttr\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n{\n pub fn description(mut self, description: impl Into>) -> Self {\n self.attr.description = Some(description.into());\n self\n }\n pub fn parameters(mut self) -> Self {\n self.attr.input_schema = schema_for_type::().into();\n self\n }\n pub fn parameters_value(mut self, schema: serde_json::Value) -> Self {\n self.attr.input_schema = crate::model::object(schema).into();\n self\n }\n pub fn annotation(mut self, annotation: impl Into) -> Self {\n self.attr.annotations = Some(annotation.into());\n self\n }\n}\n#[derive(Debug)]\npub struct ToolRouter {\n #[allow(clippy::type_complexity)]\n pub map: std::collections::HashMap, ToolRoute>,\n\n pub transparent_when_not_found: bool,\n}\n\nimpl Default for ToolRouter {\n fn default() -> Self {\n Self {\n map: std::collections::HashMap::new(),\n transparent_when_not_found: false,\n }\n }\n}\nimpl Clone for ToolRouter {\n fn clone(&self) -> Self {\n Self {\n map: self.map.clone(),\n transparent_when_not_found: self.transparent_when_not_found,\n }\n }\n}\n\nimpl IntoIterator for ToolRouter {\n type Item = ToolRoute;\n type IntoIter = std::collections::hash_map::IntoValues, ToolRoute>;\n\n fn into_iter(self) -> Self::IntoIter {\n self.map.into_values()\n }\n}\n\nimpl ToolRouter\nwhere\n S: Send + Sync + 'static,\n{\n pub fn new() -> Self {\n Self {\n map: std::collections::HashMap::new(),\n transparent_when_not_found: false,\n }\n }\n pub fn with_route(mut self, route: R) -> Self\n where\n R: IntoToolRoute,\n {\n self.add_route(route.into_tool_route());\n self\n }\n\n pub fn add_route(&mut self, item: ToolRoute) {\n self.map.insert(item.attr.name.clone(), item);\n }\n\n pub fn merge(&mut self, other: ToolRouter) {\n for item in other.map.into_values() {\n self.add_route(item);\n }\n }\n\n pub fn remove_route(&mut self, name: &str) {\n self.map.remove(name);\n }\n pub fn has_route(&self, name: &str) -> bool {\n self.map.contains_key(name)\n }\n pub async fn call(\n &self,\n context: ToolCallContext<'_, S>,\n ) -> Result {\n let item = self\n .map\n .get(context.name())\n .ok_or_else(|| crate::ErrorData::invalid_params(\"tool not found\", None))?;\n (item.call)(context).await\n }\n\n pub fn list_all(&self) -> Vec {\n self.map.values().map(|item| item.attr.clone()).collect()\n }\n}\n\nimpl std::ops::Add> for ToolRouter\nwhere\n S: Send + Sync + 'static,\n{\n type Output = Self;\n\n fn add(mut self, other: ToolRouter) -> Self::Output {\n self.merge(other);\n self\n }\n}\n\nimpl std::ops::AddAssign> for ToolRouter\nwhere\n S: Send + Sync + 'static,\n{\n fn add_assign(&mut self, other: ToolRouter) {\n self.merge(other);\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server/router.rs", "use std::sync::Arc;\n\nuse tool::{IntoToolRoute, ToolRoute};\n\nuse super::ServerHandler;\nuse crate::{\n RoleServer, Service,\n model::{ClientRequest, ListToolsResult, ServerResult},\n service::NotificationContext,\n};\n\npub mod tool;\n\npub struct Router {\n pub tool_router: tool::ToolRouter,\n pub service: Arc,\n}\n\nimpl Router\nwhere\n S: ServerHandler,\n{\n pub fn new(service: S) -> Self {\n Self {\n tool_router: tool::ToolRouter::new(),\n service: Arc::new(service),\n }\n }\n\n pub fn with_tool(mut self, route: R) -> Self\n where\n R: IntoToolRoute,\n {\n self.tool_router.add_route(route.into_tool_route());\n self\n }\n\n pub fn with_tools(mut self, routes: impl IntoIterator>) -> Self {\n for route in routes {\n self.tool_router.add_route(route);\n }\n self\n }\n}\n\nimpl Service for Router\nwhere\n S: ServerHandler,\n{\n async fn handle_notification(\n &self,\n notification: ::PeerNot,\n context: NotificationContext,\n ) -> Result<(), crate::ErrorData> {\n self.service\n .handle_notification(notification, context)\n .await\n }\n async fn handle_request(\n &self,\n request: ::PeerReq,\n context: crate::service::RequestContext,\n ) -> Result<::Resp, crate::ErrorData> {\n match request {\n ClientRequest::CallToolRequest(request) => {\n if self.tool_router.has_route(request.params.name.as_ref())\n || !self.tool_router.transparent_when_not_found\n {\n let tool_call_context = crate::handler::server::tool::ToolCallContext::new(\n self.service.as_ref(),\n request.params,\n context,\n );\n let result = self.tool_router.call(tool_call_context).await?;\n Ok(ServerResult::CallToolResult(result))\n } else {\n self.service\n .handle_request(ClientRequest::CallToolRequest(request), context)\n .await\n }\n }\n ClientRequest::ListToolsRequest(_) => {\n let tools = self.tool_router.list_all();\n Ok(ServerResult::ListToolsResult(ListToolsResult {\n tools,\n next_cursor: None,\n }))\n }\n rest => self.service.handle_request(rest, context).await,\n }\n }\n\n fn get_info(&self) -> ::Info {\n self.service.get_info()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/client.rs", "pub mod progress;\nuse crate::{\n error::ErrorData as McpError,\n model::*,\n service::{NotificationContext, RequestContext, RoleClient, Service, ServiceRole},\n};\n\nimpl Service for H {\n async fn handle_request(\n &self,\n request: ::PeerReq,\n context: RequestContext,\n ) -> Result<::Resp, McpError> {\n match request {\n ServerRequest::PingRequest(_) => self.ping(context).await.map(ClientResult::empty),\n ServerRequest::CreateMessageRequest(request) => self\n .create_message(request.params, context)\n .await\n .map(ClientResult::CreateMessageResult),\n ServerRequest::ListRootsRequest(_) => self\n .list_roots(context)\n .await\n .map(ClientResult::ListRootsResult),\n }\n }\n\n async fn handle_notification(\n &self,\n notification: ::PeerNot,\n context: NotificationContext,\n ) -> Result<(), McpError> {\n match notification {\n ServerNotification::CancelledNotification(notification) => {\n self.on_cancelled(notification.params, context).await\n }\n ServerNotification::ProgressNotification(notification) => {\n self.on_progress(notification.params, context).await\n }\n ServerNotification::LoggingMessageNotification(notification) => {\n self.on_logging_message(notification.params, context).await\n }\n ServerNotification::ResourceUpdatedNotification(notification) => {\n self.on_resource_updated(notification.params, context).await\n }\n ServerNotification::ResourceListChangedNotification(_notification_no_param) => {\n self.on_resource_list_changed(context).await\n }\n ServerNotification::ToolListChangedNotification(_notification_no_param) => {\n self.on_tool_list_changed(context).await\n }\n ServerNotification::PromptListChangedNotification(_notification_no_param) => {\n self.on_prompt_list_changed(context).await\n }\n };\n Ok(())\n }\n\n fn get_info(&self) -> ::Info {\n self.get_info()\n }\n}\n\n#[allow(unused_variables)]\npub trait ClientHandler: Sized + Send + Sync + 'static {\n fn ping(\n &self,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(()))\n }\n\n fn create_message(\n &self,\n params: CreateMessageRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(\n McpError::method_not_found::(),\n ))\n }\n\n fn list_roots(\n &self,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListRootsResult::default()))\n }\n\n fn on_cancelled(\n &self,\n params: CancelledNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_progress(\n &self,\n params: ProgressNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_logging_message(\n &self,\n params: LoggingMessageNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_resource_updated(\n &self,\n params: ResourceUpdatedNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_resource_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_tool_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_prompt_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n\n fn get_info(&self) -> ClientInfo {\n ClientInfo::default()\n }\n}\n\n/// Do nothing, with default client info.\nimpl ClientHandler for () {}\n\n/// Do nothing, with a specific client info.\nimpl ClientHandler for ClientInfo {\n fn get_info(&self) -> ClientInfo {\n self.clone()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server.rs", "use crate::{\n error::ErrorData as McpError,\n model::*,\n service::{NotificationContext, RequestContext, RoleServer, Service, ServiceRole},\n};\n\nmod resource;\npub mod router;\npub mod tool;\npub mod wrapper;\nimpl Service for H {\n async fn handle_request(\n &self,\n request: ::PeerReq,\n context: RequestContext,\n ) -> Result<::Resp, McpError> {\n match request {\n ClientRequest::InitializeRequest(request) => self\n .initialize(request.params, context)\n .await\n .map(ServerResult::InitializeResult),\n ClientRequest::PingRequest(_request) => {\n self.ping(context).await.map(ServerResult::empty)\n }\n ClientRequest::CompleteRequest(request) => self\n .complete(request.params, context)\n .await\n .map(ServerResult::CompleteResult),\n ClientRequest::SetLevelRequest(request) => self\n .set_level(request.params, context)\n .await\n .map(ServerResult::empty),\n ClientRequest::GetPromptRequest(request) => self\n .get_prompt(request.params, context)\n .await\n .map(ServerResult::GetPromptResult),\n ClientRequest::ListPromptsRequest(request) => self\n .list_prompts(request.params, context)\n .await\n .map(ServerResult::ListPromptsResult),\n ClientRequest::ListResourcesRequest(request) => self\n .list_resources(request.params, context)\n .await\n .map(ServerResult::ListResourcesResult),\n ClientRequest::ListResourceTemplatesRequest(request) => self\n .list_resource_templates(request.params, context)\n .await\n .map(ServerResult::ListResourceTemplatesResult),\n ClientRequest::ReadResourceRequest(request) => self\n .read_resource(request.params, context)\n .await\n .map(ServerResult::ReadResourceResult),\n ClientRequest::SubscribeRequest(request) => self\n .subscribe(request.params, context)\n .await\n .map(ServerResult::empty),\n ClientRequest::UnsubscribeRequest(request) => self\n .unsubscribe(request.params, context)\n .await\n .map(ServerResult::empty),\n ClientRequest::CallToolRequest(request) => self\n .call_tool(request.params, context)\n .await\n .map(ServerResult::CallToolResult),\n ClientRequest::ListToolsRequest(request) => self\n .list_tools(request.params, context)\n .await\n .map(ServerResult::ListToolsResult),\n }\n }\n\n async fn handle_notification(\n &self,\n notification: ::PeerNot,\n context: NotificationContext,\n ) -> Result<(), McpError> {\n match notification {\n ClientNotification::CancelledNotification(notification) => {\n self.on_cancelled(notification.params, context).await\n }\n ClientNotification::ProgressNotification(notification) => {\n self.on_progress(notification.params, context).await\n }\n ClientNotification::InitializedNotification(_notification) => {\n self.on_initialized(context).await\n }\n ClientNotification::RootsListChangedNotification(_notification) => {\n self.on_roots_list_changed(context).await\n }\n };\n Ok(())\n }\n\n fn get_info(&self) -> ::Info {\n self.get_info()\n }\n}\n\n#[allow(unused_variables)]\npub trait ServerHandler: Sized + Send + Sync + 'static {\n fn ping(\n &self,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(()))\n }\n // handle requests\n fn initialize(\n &self,\n request: InitializeRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n if context.peer.peer_info().is_none() {\n context.peer.set_peer_info(request);\n }\n std::future::ready(Ok(self.get_info()))\n }\n fn complete(\n &self,\n request: CompleteRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn set_level(\n &self,\n request: SetLevelRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn get_prompt(\n &self,\n request: GetPromptRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn list_prompts(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListPromptsResult::default()))\n }\n fn list_resources(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListResourcesResult::default()))\n }\n fn list_resource_templates(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListResourceTemplatesResult::default()))\n }\n fn read_resource(\n &self,\n request: ReadResourceRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(\n McpError::method_not_found::(),\n ))\n }\n fn subscribe(\n &self,\n request: SubscribeRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn unsubscribe(\n &self,\n request: UnsubscribeRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn call_tool(\n &self,\n request: CallToolRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn list_tools(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListToolsResult::default()))\n }\n\n fn on_cancelled(\n &self,\n notification: CancelledNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_progress(\n &self,\n notification: ProgressNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_initialized(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n tracing::info!(\"client initialized\");\n std::future::ready(())\n }\n fn on_roots_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n\n fn get_info(&self) -> ServerInfo {\n ServerInfo::default()\n }\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/tool_router.rs", "//! ```ignore\n//! #[rmcp::tool_router(router)]\n//! impl Handler {\n//!\n//! }\n//! ```\n//!\n\nuse darling::{FromMeta, ast::NestedMeta};\nuse proc_macro2::TokenStream;\nuse quote::{ToTokens, format_ident, quote};\nuse syn::{Ident, ImplItem, ItemImpl, Visibility};\n\n#[derive(FromMeta)]\n#[darling(default)]\npub struct ToolRouterAttribute {\n pub router: Ident,\n pub vis: Option,\n}\n\nimpl Default for ToolRouterAttribute {\n fn default() -> Self {\n Self {\n router: format_ident!(\"tool_router\"),\n vis: None,\n }\n }\n}\n\npub fn tool_router(attr: TokenStream, input: TokenStream) -> syn::Result {\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n let ToolRouterAttribute { router, vis } = ToolRouterAttribute::from_list(&attr_args)?;\n let mut item_impl = syn::parse2::(input.clone())?;\n // find all function marked with `#[rmcp::tool]`\n let tool_attr_fns: Vec<_> = item_impl\n .items\n .iter()\n .filter_map(|item| {\n if let syn::ImplItem::Fn(fn_item) = item {\n fn_item\n .attrs\n .iter()\n .any(|attr| {\n attr.path()\n .segments\n .last()\n .is_some_and(|seg| seg.ident == \"tool\")\n })\n .then_some(&fn_item.sig.ident)\n } else {\n None\n }\n })\n .collect();\n let mut routers = vec![];\n for handler in tool_attr_fns {\n let tool_attr_fn_ident = format_ident!(\"{handler}_tool_attr\");\n routers.push(quote! {\n .with_route((Self::#tool_attr_fn_ident(), Self::#handler))\n })\n }\n let router_fn = syn::parse2::(quote! {\n #vis fn #router() -> rmcp::handler::server::router::tool::ToolRouter {\n rmcp::handler::server::router::tool::ToolRouter::::new()\n #(#routers)*\n }\n })?;\n item_impl.items.push(router_fn);\n Ok(item_impl.into_token_stream())\n}\n\n#[cfg(test)]\nmod test {\n use super::*;\n #[test]\n fn test_router_attr() -> Result<(), Box> {\n let attr = quote! {\n router = test_router,\n };\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n let ToolRouterAttribute { router, vis } = ToolRouterAttribute::from_list(&attr_args)?;\n println!(\"router: {}\", router);\n if let Some(vis) = vis {\n println!(\"visibility: {}\", vis.to_token_stream());\n } else {\n println!(\"visibility: None\");\n }\n Ok(())\n }\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/lib.rs", "#[allow(unused_imports)]\nuse proc_macro::TokenStream;\n\nmod tool;\nmod tool_handler;\nmod tool_router;\n/// # tool\n///\n/// This macro is used to mark a function as a tool handler.\n///\n/// This will generate a function that return the attribute of this tool, with type `rmcp::model::Tool`.\n///\n/// ## Usage\n///\n/// | feied | type | usage |\n/// | :- | :- | :- |\n/// | `name` | `String` | The name of the tool. If not provided, it defaults to the function name. |\n/// | `description` | `String` | A description of the tool. The document of this function will be used. |\n/// | `input_schema` | `Expr` | A JSON Schema object defining the expected parameters for the tool. If not provide, if will use the json schema of its argument with type `Parameters` |\n/// | `annotations` | `ToolAnnotationsAttribute` | Additional tool information. Defaults to `None`. |\n///\n/// ## Example\n///\n/// ```rust,ignore\n/// #[tool(name = \"my_tool\", description = \"This is my tool\", annotations(title = \"我的工具\", read_only_hint = true))]\n/// pub async fn my_tool(param: Parameters) {\n/// // handling tool request\n/// }\n/// ```\n#[proc_macro_attribute]\npub fn tool(attr: TokenStream, input: TokenStream) -> TokenStream {\n tool::tool(attr.into(), input.into())\n .unwrap_or_else(|err| err.to_compile_error())\n .into()\n}\n\n/// # tool_router\n///\n/// This macro is used to generate a tool router based on functions marked with `#[rmcp::tool]` in an implementation block.\n///\n/// It creates a function that returns a `ToolRouter` instance.\n///\n/// In most case, you need to add a field for handler to store the router information and initialize it when creating handler, or store it with a static variable.\n/// ## Usage\n///\n/// | feied | type | usage |\n/// | :- | :- | :- |\n/// | `router` | `Ident` | The name of the router function to be generated. Defaults to `tool_router`. |\n/// | `vis` | `Visibility` | The visibility of the generated router function. Defaults to empty. |\n///\n/// ## Example\n///\n/// ```rust,ignore\n/// #[tool_router]\n/// impl MyToolHandler {\n/// #[tool]\n/// pub fn my_tool() {\n/// \n/// }\n///\n/// pub fn new() -> Self {\n/// Self {\n/// // the default name of tool router will be `tool_router`\n/// tool_router: Self::tool_router(),\n/// }\n/// }\n/// }\n/// ```\n///\n/// Or specify the visibility and router name, which would be helpful when you want to combine multiple routers into one:\n///\n/// ```rust,ignore\n/// mod a {\n/// #[tool_router(router = tool_router_a, vis = pub)]\n/// impl MyToolHandler {\n/// #[tool]\n/// fn my_tool_a() {\n/// \n/// }\n/// }\n/// }\n///\n/// mod b {\n/// #[tool_router(router = tool_router_b, vis = pub)]\n/// impl MyToolHandler {\n/// #[tool]\n/// fn my_tool_b() {\n/// \n/// }\n/// }\n/// }\n///\n/// impl MyToolHandler {\n/// fn new() -> Self {\n/// Self {\n/// tool_router: self::tool_router_a() + self::tool_router_b(),\n/// }\n/// }\n/// }\n/// ```\n#[proc_macro_attribute]\npub fn tool_router(attr: TokenStream, input: TokenStream) -> TokenStream {\n tool_router::tool_router(attr.into(), input.into())\n .unwrap_or_else(|err| err.to_compile_error())\n .into()\n}\n\n/// # tool_handler\n///\n/// This macro will generate the handler for `tool_call` and `list_tools` methods in the implementation block, by using an existing `ToolRouter` instance.\n///\n/// ## Usage\n///\n/// | field | type | usage |\n/// | :- | :- | :- |\n/// | `router` | `Expr` | The expression to access the `ToolRouter` instance. Defaults to `self.tool_router`. |\n/// ## Example\n/// ```rust,ignore\n/// #[tool_handler]\n/// impl ServerHandler for MyToolHandler {\n/// // ...implement other handler\n/// }\n/// ```\n///\n/// or using a custom router expression:\n/// ```rust,ignore\n/// #[tool_handler(router = self.get_router().await)]\n/// impl ServerHandler for MyToolHandler {\n/// // ...implement other handler\n/// }\n/// ```\n///\n/// ## Explain\n///\n/// This macro will be expended to something like this:\n/// ```rust,ignore\n/// impl ServerHandler for MyToolHandler {\n/// async fn call_tool(\n/// &self,\n/// request: CallToolRequestParam,\n/// context: RequestContext,\n/// ) -> Result {\n/// let tcc = ToolCallContext::new(self, request, context);\n/// self.tool_router.call(tcc).await\n/// }\n///\n/// async fn list_tools(\n/// &self,\n/// _request: Option,\n/// _context: RequestContext,\n/// ) -> Result {\n/// let items = self.tool_router.list_all();\n/// Ok(ListToolsResult::with_all_items(items))\n/// }\n/// }\n/// ```\n#[proc_macro_attribute]\npub fn tool_handler(attr: TokenStream, input: TokenStream) -> TokenStream {\n tool_handler::tool_handler(attr.into(), input.into())\n .unwrap_or_else(|err| err.to_compile_error())\n .into()\n}\n"], ["/rust-sdk/crates/rmcp/src/model/annotated.rs", "use std::ops::{Deref, DerefMut};\n\nuse chrono::{DateTime, Utc};\nuse serde::{Deserialize, Serialize};\n\nuse super::{\n RawAudioContent, RawContent, RawEmbeddedResource, RawImageContent, RawResource,\n RawResourceTemplate, RawTextContent, Role,\n};\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Annotations {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub audience: Option>,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub priority: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub timestamp: Option>,\n}\n\nimpl Annotations {\n /// Creates a new Annotations instance specifically for resources\n /// optional priority, and a timestamp (defaults to now if None)\n pub fn for_resource(priority: f32, timestamp: DateTime) -> Self {\n assert!(\n (0.0..=1.0).contains(&priority),\n \"Priority {priority} must be between 0.0 and 1.0\"\n );\n Annotations {\n priority: Some(priority),\n timestamp: Some(timestamp),\n audience: None,\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Annotated {\n #[serde(flatten)]\n pub raw: T,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub annotations: Option,\n}\n\nimpl Deref for Annotated {\n type Target = T;\n fn deref(&self) -> &Self::Target {\n &self.raw\n }\n}\n\nimpl DerefMut for Annotated {\n fn deref_mut(&mut self) -> &mut Self::Target {\n &mut self.raw\n }\n}\n\nimpl Annotated {\n pub fn new(raw: T, annotations: Option) -> Self {\n Self { raw, annotations }\n }\n pub fn remove_annotation(&mut self) -> Option {\n self.annotations.take()\n }\n pub fn audience(&self) -> Option<&Vec> {\n self.annotations.as_ref().and_then(|a| a.audience.as_ref())\n }\n pub fn priority(&self) -> Option {\n self.annotations.as_ref().and_then(|a| a.priority)\n }\n pub fn timestamp(&self) -> Option> {\n self.annotations.as_ref().and_then(|a| a.timestamp)\n }\n pub fn with_audience(self, audience: Vec) -> Annotated\n where\n Self: Sized,\n {\n if let Some(annotations) = self.annotations {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n audience: Some(audience),\n ..annotations\n }),\n }\n } else {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n audience: Some(audience),\n priority: None,\n timestamp: None,\n }),\n }\n }\n }\n pub fn with_priority(self, priority: f32) -> Annotated\n where\n Self: Sized,\n {\n if let Some(annotations) = self.annotations {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n priority: Some(priority),\n ..annotations\n }),\n }\n } else {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n priority: Some(priority),\n timestamp: None,\n audience: None,\n }),\n }\n }\n }\n pub fn with_timestamp(self, timestamp: DateTime) -> Annotated\n where\n Self: Sized,\n {\n if let Some(annotations) = self.annotations {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n timestamp: Some(timestamp),\n ..annotations\n }),\n }\n } else {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n timestamp: Some(timestamp),\n priority: None,\n audience: None,\n }),\n }\n }\n }\n pub fn with_timestamp_now(self) -> Annotated\n where\n Self: Sized,\n {\n self.with_timestamp(Utc::now())\n }\n}\n\nmod sealed {\n pub trait Sealed {}\n}\nmacro_rules! annotate {\n ($T: ident) => {\n impl sealed::Sealed for $T {}\n impl AnnotateAble for $T {}\n };\n}\n\nannotate!(RawContent);\nannotate!(RawTextContent);\nannotate!(RawImageContent);\nannotate!(RawAudioContent);\nannotate!(RawEmbeddedResource);\nannotate!(RawResource);\nannotate!(RawResourceTemplate);\n\npub trait AnnotateAble: sealed::Sealed {\n fn optional_annotate(self, annotations: Option) -> Annotated\n where\n Self: Sized,\n {\n Annotated::new(self, annotations)\n }\n fn annotate(self, annotations: Annotations) -> Annotated\n where\n Self: Sized,\n {\n Annotated::new(self, Some(annotations))\n }\n fn no_annotation(self) -> Annotated\n where\n Self: Sized,\n {\n Annotated::new(self, None)\n }\n fn with_audience(self, audience: Vec) -> Annotated\n where\n Self: Sized,\n {\n self.annotate(Annotations {\n audience: Some(audience),\n ..Default::default()\n })\n }\n fn with_priority(self, priority: f32) -> Annotated\n where\n Self: Sized,\n {\n self.annotate(Annotations {\n priority: Some(priority),\n ..Default::default()\n })\n }\n fn with_timestamp(self, timestamp: DateTime) -> Annotated\n where\n Self: Sized,\n {\n self.annotate(Annotations {\n timestamp: Some(timestamp),\n ..Default::default()\n })\n }\n fn with_timestamp_now(self) -> Annotated\n where\n Self: Sized,\n {\n self.with_timestamp(Utc::now())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/prompt.rs", "use base64::engine::{Engine, general_purpose::STANDARD as BASE64_STANDARD};\nuse serde::{Deserialize, Serialize};\n\nuse super::{\n AnnotateAble, Annotations, RawEmbeddedResource, RawImageContent,\n content::{EmbeddedResource, ImageContent},\n resource::ResourceContents,\n};\n\n/// A prompt that can be used to generate text from a model\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Prompt {\n /// The name of the prompt\n pub name: String,\n /// Optional description of what the prompt does\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n /// Optional arguments that can be passed to customize the prompt\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub arguments: Option>,\n}\n\nimpl Prompt {\n /// Create a new prompt with the given name, description and arguments\n pub fn new(\n name: N,\n description: Option,\n arguments: Option>,\n ) -> Self\n where\n N: Into,\n D: Into,\n {\n Prompt {\n name: name.into(),\n description: description.map(Into::into),\n arguments,\n }\n }\n}\n\n/// Represents a prompt argument that can be passed to customize the prompt\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptArgument {\n /// The name of the argument\n pub name: String,\n /// A description of what the argument is used for\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n /// Whether this argument is required\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub required: Option,\n}\n\n/// Represents the role of a message sender in a prompt conversation\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum PromptMessageRole {\n User,\n Assistant,\n}\n\n/// Content types that can be included in prompt messages\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(tag = \"type\", rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum PromptMessageContent {\n /// Plain text content\n Text { text: String },\n /// Image content with base64-encoded data\n Image {\n #[serde(flatten)]\n image: ImageContent,\n },\n /// Embedded server-side resource\n Resource { resource: EmbeddedResource },\n}\n\nimpl PromptMessageContent {\n pub fn text(text: impl Into) -> Self {\n Self::Text { text: text.into() }\n }\n}\n\n/// A message in a prompt conversation\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptMessage {\n /// The role of the message sender\n pub role: PromptMessageRole,\n /// The content of the message\n pub content: PromptMessageContent,\n}\n\nimpl PromptMessage {\n /// Create a new text message with the given role and text content\n pub fn new_text>(role: PromptMessageRole, text: S) -> Self {\n Self {\n role,\n content: PromptMessageContent::Text { text: text.into() },\n }\n }\n #[cfg(feature = \"base64\")]\n pub fn new_image(\n role: PromptMessageRole,\n data: &[u8],\n mime_type: &str,\n annotations: Option,\n ) -> Self {\n let mime_type = mime_type.into();\n\n let base64 = BASE64_STANDARD.encode(data);\n\n Self {\n role,\n content: PromptMessageContent::Image {\n image: RawImageContent {\n data: base64,\n mime_type,\n }\n .optional_annotate(annotations),\n },\n }\n }\n\n /// Create a new resource message\n pub fn new_resource(\n role: PromptMessageRole,\n uri: String,\n mime_type: String,\n text: Option,\n annotations: Option,\n ) -> Self {\n let resource_contents = ResourceContents::TextResourceContents {\n uri,\n mime_type: Some(mime_type),\n text: text.unwrap_or_default(),\n };\n\n Self {\n role,\n content: PromptMessageContent::Resource {\n resource: RawEmbeddedResource {\n resource: resource_contents,\n }\n .optional_annotate(annotations),\n },\n }\n }\n}\n\n/// A template for a prompt\n#[derive(Debug, Serialize, Deserialize)]\npub struct PromptTemplate {\n pub id: String,\n pub template: String,\n pub arguments: Vec,\n}\n\n/// A template for a prompt argument, this should be identical to PromptArgument\n#[derive(Debug, Serialize, Deserialize)]\npub struct PromptArgumentTemplate {\n pub name: String,\n pub description: Option,\n pub required: Option,\n}\n"], ["/rust-sdk/crates/rmcp/src/model/tool.rs", "use std::{borrow::Cow, sync::Arc};\n\n/// Tools represent a routine that a server can execute\n/// Tool calls represent requests from the client to execute one\nuse serde::{Deserialize, Serialize};\nuse serde_json::Value;\n\nuse super::JsonObject;\n\n/// A tool that can be used by a model.\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Tool {\n /// The name of the tool\n pub name: Cow<'static, str>,\n /// A description of what the tool does\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option>,\n /// A JSON Schema object defining the expected parameters for the tool\n pub input_schema: Arc,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n /// Optional additional tool information.\n pub annotations: Option,\n}\n\n/// Additional properties describing a Tool to clients.\n///\n/// NOTE: all properties in ToolAnnotations are **hints**.\n/// They are not guaranteed to provide a faithful description of\n/// tool behavior (including descriptive properties like `title`).\n///\n/// Clients should never make tool use decisions based on ToolAnnotations\n/// received from untrusted servers.\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ToolAnnotations {\n /// A human-readable title for the tool.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub title: Option,\n\n /// If true, the tool does not modify its environment.\n ///\n /// Default: false\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub read_only_hint: Option,\n\n /// If true, the tool may perform destructive updates to its environment.\n /// If false, the tool performs only additive updates.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: true\n /// A human-readable description of the tool's purpose.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub destructive_hint: Option,\n\n /// If true, calling the tool repeatedly with the same arguments\n /// will have no additional effect on the its environment.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: false.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub idempotent_hint: Option,\n\n /// If true, this tool may interact with an \"open world\" of external\n /// entities. If false, the tool's domain of interaction is closed.\n /// For example, the world of a web search tool is open, whereas that\n /// of a memory tool is not.\n ///\n /// Default: true\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub open_world_hint: Option,\n}\n\nimpl ToolAnnotations {\n pub fn new() -> Self {\n Self::default()\n }\n pub fn with_title(title: T) -> Self\n where\n T: Into,\n {\n ToolAnnotations {\n title: Some(title.into()),\n ..Self::default()\n }\n }\n pub fn read_only(self, read_only: bool) -> Self {\n ToolAnnotations {\n read_only_hint: Some(read_only),\n ..self\n }\n }\n pub fn destructive(self, destructive: bool) -> Self {\n ToolAnnotations {\n destructive_hint: Some(destructive),\n ..self\n }\n }\n pub fn idempotent(self, idempotent: bool) -> Self {\n ToolAnnotations {\n idempotent_hint: Some(idempotent),\n ..self\n }\n }\n pub fn open_world(self, open_world: bool) -> Self {\n ToolAnnotations {\n open_world_hint: Some(open_world),\n ..self\n }\n }\n\n /// If not set, defaults to true.\n pub fn is_destructive(&self) -> bool {\n self.destructive_hint.unwrap_or(true)\n }\n\n /// If not set, defaults to false.\n pub fn is_idempotent(&self) -> bool {\n self.idempotent_hint.unwrap_or(false)\n }\n}\n\nimpl Tool {\n /// Create a new tool with the given name and description\n pub fn new(name: N, description: D, input_schema: S) -> Self\n where\n N: Into>,\n D: Into>,\n S: Into>,\n {\n Tool {\n name: name.into(),\n description: Some(description.into()),\n input_schema: input_schema.into(),\n annotations: None,\n }\n }\n\n pub fn annotate(self, annotations: ToolAnnotations) -> Self {\n Tool {\n annotations: Some(annotations),\n ..self\n }\n }\n\n /// Get the schema as json value\n pub fn schema_as_json_value(&self) -> Value {\n Value::Object(self.input_schema.as_ref().clone())\n }\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/tool_handler.rs", "use darling::{FromMeta, ast::NestedMeta};\nuse proc_macro2::TokenStream;\nuse quote::{ToTokens, quote};\nuse syn::{Expr, ImplItem, ItemImpl};\n\n#[derive(FromMeta)]\n#[darling(default)]\npub struct ToolHandlerAttribute {\n pub router: Expr,\n}\n\nimpl Default for ToolHandlerAttribute {\n fn default() -> Self {\n Self {\n router: syn::parse2(quote! {\n self.tool_router\n })\n .unwrap(),\n }\n }\n}\n\npub fn tool_handler(attr: TokenStream, input: TokenStream) -> syn::Result {\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n let ToolHandlerAttribute { router } = ToolHandlerAttribute::from_list(&attr_args)?;\n let mut item_impl = syn::parse2::(input.clone())?;\n let tool_call_fn = quote! {\n async fn call_tool(\n &self,\n request: rmcp::model::CallToolRequestParam,\n context: rmcp::service::RequestContext,\n ) -> Result {\n let tcc = rmcp::handler::server::tool::ToolCallContext::new(self, request, context);\n #router.call(tcc).await\n }\n };\n let tool_list_fn = quote! {\n async fn list_tools(\n &self,\n _request: Option,\n _context: rmcp::service::RequestContext,\n ) -> Result {\n Ok(rmcp::model::ListToolsResult::with_all_items(#router.list_all()))\n }\n };\n let tool_call_fn = syn::parse2::(tool_call_fn)?;\n let tool_list_fn = syn::parse2::(tool_list_fn)?;\n item_impl.items.push(tool_call_fn);\n item_impl.items.push(tool_list_fn);\n Ok(item_impl.into_token_stream())\n}\n"], ["/rust-sdk/crates/rmcp/src/error.rs", "use std::{borrow::Cow, fmt::Display};\n\nuse crate::ServiceError;\npub use crate::model::ErrorData;\n#[deprecated(\n note = \"Use `rmcp::ErrorData` instead, `rmcp::ErrorData` could become `RmcpError` in the future.\"\n)]\npub type Error = ErrorData;\nimpl Display for ErrorData {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.code.0, self.message)?;\n if let Some(data) = &self.data {\n write!(f, \"({})\", data)?;\n }\n Ok(())\n }\n}\n\nimpl std::error::Error for ErrorData {}\n\n/// This is an unified error type for the errors could be returned by the service.\n#[derive(Debug, thiserror::Error)]\npub enum RmcpError {\n #[error(\"Service error: {0}\")]\n Service(#[from] ServiceError),\n #[cfg(feature = \"client\")]\n #[error(\"Client initialization error: {0}\")]\n ClientInitialize(#[from] crate::service::ClientInitializeError),\n #[cfg(feature = \"server\")]\n #[error(\"Server initialization error: {0}\")]\n ServerInitialize(#[from] crate::service::ServerInitializeError),\n #[error(\"Runtime error: {0}\")]\n Runtime(#[from] tokio::task::JoinError),\n #[error(\"Transport creation error: {error}\")]\n // TODO: Maybe we can introduce something like `TryIntoTransport` to auto wrap transport type,\n // but it could be an breaking change, so we could do it in the future.\n TransportCreation {\n into_transport_type_name: Cow<'static, str>,\n into_transport_type_id: std::any::TypeId,\n #[source]\n error: Box,\n },\n // and cancellation shouldn't be an error?\n}\n\nimpl RmcpError {\n pub fn transport_creation(\n error: impl Into>,\n ) -> Self {\n RmcpError::TransportCreation {\n into_transport_type_id: std::any::TypeId::of::(),\n into_transport_type_name: std::any::type_name::().into(),\n error: error.into(),\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/resource.rs", "use serde::{Deserialize, Serialize};\n\nuse super::Annotated;\n\n/// Represents a resource in the extension with metadata\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawResource {\n /// URI representing the resource location (e.g., \"file:///path/to/file\" or \"str:///content\")\n pub uri: String,\n /// Name of the resource\n pub name: String,\n /// Optional description of the resource\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n /// MIME type of the resource content (\"text\" or \"blob\")\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub mime_type: Option,\n\n /// The size of the raw resource content, in bytes (i.e., before base64 encoding or any tokenization), if known.\n ///\n /// This can be used by Hosts to display file sizes and estimate context window us\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub size: Option,\n}\n\npub type Resource = Annotated;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawResourceTemplate {\n pub uri_template: String,\n pub name: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub mime_type: Option,\n}\n\npub type ResourceTemplate = Annotated;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\", untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum ResourceContents {\n TextResourceContents {\n uri: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n mime_type: Option,\n text: String,\n },\n BlobResourceContents {\n uri: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n mime_type: Option,\n blob: String,\n },\n}\n\nimpl ResourceContents {\n pub fn text(text: impl Into, uri: impl Into) -> Self {\n Self::TextResourceContents {\n uri: uri.into(),\n mime_type: Some(\"text\".into()),\n text: text.into(),\n }\n }\n}\n\nimpl RawResource {\n /// Creates a new Resource from a URI with explicit mime type\n pub fn new(uri: impl Into, name: impl Into) -> Self {\n Self {\n uri: uri.into(),\n name: name.into(),\n description: None,\n mime_type: None,\n size: None,\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/serde_impl.rs", "use std::borrow::Cow;\n\nuse serde::{Deserialize, Serialize};\n\nuse super::{\n Extensions, Meta, Notification, NotificationNoParam, Request, RequestNoParam,\n RequestOptionalParam,\n};\n#[derive(Serialize, Deserialize)]\nstruct WithMeta<'a, P> {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n _meta: Option>,\n #[serde(flatten)]\n _rest: P,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct Proxy<'a, M, P> {\n method: M,\n params: WithMeta<'a, P>,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct ProxyOptionalParam<'a, M, P> {\n method: M,\n params: Option>,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct ProxyNoParam {\n method: M,\n}\n\nimpl Serialize for Request\nwhere\n M: Serialize,\n R: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n Proxy::serialize(\n &Proxy {\n method: &self.method,\n params: WithMeta {\n _rest: &self.params,\n _meta,\n },\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M, R> Deserialize<'de> for Request\nwhere\n M: Deserialize<'de>,\n R: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = Proxy::deserialize(deserializer)?;\n let _meta = body.params._meta.map(|m| m.into_owned());\n let mut extensions = Extensions::new();\n if let Some(meta) = _meta {\n extensions.insert(meta);\n }\n Ok(Request {\n extensions,\n method: body.method,\n params: body.params._rest,\n })\n }\n}\n\nimpl Serialize for RequestOptionalParam\nwhere\n M: Serialize,\n R: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n Proxy::serialize(\n &Proxy {\n method: &self.method,\n params: WithMeta {\n _rest: &self.params,\n _meta,\n },\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M, R> Deserialize<'de> for RequestOptionalParam\nwhere\n M: Deserialize<'de>,\n R: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = ProxyOptionalParam::<'_, _, Option>::deserialize(deserializer)?;\n let mut params = None;\n let mut _meta = None;\n if let Some(body_params) = body.params {\n params = body_params._rest;\n _meta = body_params._meta.map(|m| m.into_owned());\n }\n let mut extensions = Extensions::new();\n if let Some(meta) = _meta {\n extensions.insert(meta);\n }\n Ok(RequestOptionalParam {\n extensions,\n method: body.method,\n params,\n })\n }\n}\n\nimpl Serialize for RequestNoParam\nwhere\n M: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n ProxyNoParam::serialize(\n &ProxyNoParam {\n method: &self.method,\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M> Deserialize<'de> for RequestNoParam\nwhere\n M: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = ProxyNoParam::<_>::deserialize(deserializer)?;\n let extensions = Extensions::new();\n Ok(RequestNoParam {\n extensions,\n method: body.method,\n })\n }\n}\n\nimpl Serialize for Notification\nwhere\n M: Serialize,\n R: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n Proxy::serialize(\n &Proxy {\n method: &self.method,\n params: WithMeta {\n _rest: &self.params,\n _meta,\n },\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M, R> Deserialize<'de> for Notification\nwhere\n M: Deserialize<'de>,\n R: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = Proxy::deserialize(deserializer)?;\n let _meta = body.params._meta.map(|m| m.into_owned());\n let mut extensions = Extensions::new();\n if let Some(meta) = _meta {\n extensions.insert(meta);\n }\n Ok(Notification {\n extensions,\n method: body.method,\n params: body.params._rest,\n })\n }\n}\n\nimpl Serialize for NotificationNoParam\nwhere\n M: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n ProxyNoParam::serialize(\n &ProxyNoParam {\n method: &self.method,\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M> Deserialize<'de> for NotificationNoParam\nwhere\n M: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = ProxyNoParam::<_>::deserialize(deserializer)?;\n let extensions = Extensions::new();\n Ok(NotificationNoParam {\n extensions,\n method: body.method,\n })\n }\n}\n\n#[cfg(test)]\nmod test {\n use serde_json::json;\n\n use crate::model::ListToolsRequest;\n\n #[test]\n fn test_deserialize_lost_tools_request() {\n let _req: ListToolsRequest = serde_json::from_value(json!(\n {\n \"method\": \"tools/list\",\n }\n ))\n .unwrap();\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/service/tower.rs", "use std::{future::poll_fn, marker::PhantomData};\n\nuse tower_service::Service as TowerService;\n\nuse super::NotificationContext;\nuse crate::service::{RequestContext, Service, ServiceRole};\n\npub struct TowerHandler {\n pub service: S,\n pub info: R::Info,\n role: PhantomData,\n}\n\nimpl TowerHandler {\n pub fn new(service: S, info: R::Info) -> Self {\n Self {\n service,\n role: PhantomData,\n info,\n }\n }\n}\n\nimpl Service for TowerHandler\nwhere\n S: TowerService + Sync + Send + Clone + 'static,\n S::Error: Into,\n S::Future: Send,\n{\n async fn handle_request(\n &self,\n request: R::PeerReq,\n _context: RequestContext,\n ) -> Result {\n let mut service = self.service.clone();\n poll_fn(|cx| service.poll_ready(cx))\n .await\n .map_err(Into::into)?;\n let resp = service.call(request).await.map_err(Into::into)?;\n Ok(resp)\n }\n\n fn handle_notification(\n &self,\n _notification: R::PeerNot,\n _context: NotificationContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(()))\n }\n\n fn get_info(&self) -> R::Info {\n self.info.clone()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/session/never.rs", "use futures::Stream;\nuse thiserror::Error;\n\nuse super::{ServerSseMessage, SessionId, SessionManager};\nuse crate::{\n RoleServer,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::Transport,\n};\n\n#[derive(Debug, Clone, Error)]\n#[error(\"Session management is not supported\")]\npub struct ErrorSessionManagementNotSupported;\n#[derive(Debug, Clone, Default)]\npub struct NeverSessionManager {}\npub enum NeverTransport {}\nimpl Transport for NeverTransport {\n type Error = ErrorSessionManagementNotSupported;\n\n fn send(\n &mut self,\n _item: ServerJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn receive(&mut self) -> impl Future> {\n futures::future::ready(None)\n }\n\n async fn close(&mut self) -> Result<(), Self::Error> {\n Err(ErrorSessionManagementNotSupported)\n }\n}\n\nimpl SessionManager for NeverSessionManager {\n type Error = ErrorSessionManagementNotSupported;\n type Transport = NeverTransport;\n\n fn create_session(\n &self,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn initialize_session(\n &self,\n _id: &SessionId,\n _message: ClientJsonRpcMessage,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn has_session(\n &self,\n _id: &SessionId,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn close_session(\n &self,\n _id: &SessionId,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn create_stream(\n &self,\n _id: &SessionId,\n _message: ClientJsonRpcMessage,\n ) -> impl Future<\n Output = Result + Send + 'static, Self::Error>,\n > + Send {\n futures::future::ready(Result::, _>::Err(\n ErrorSessionManagementNotSupported,\n ))\n }\n fn create_standalone_stream(\n &self,\n _id: &SessionId,\n ) -> impl Future<\n Output = Result + Send + 'static, Self::Error>,\n > + Send {\n futures::future::ready(Result::, _>::Err(\n ErrorSessionManagementNotSupported,\n ))\n }\n fn resume(\n &self,\n _id: &SessionId,\n _last_event_id: String,\n ) -> impl Future<\n Output = Result + Send + 'static, Self::Error>,\n > + Send {\n futures::future::ready(Result::, _>::Err(\n ErrorSessionManagementNotSupported,\n ))\n }\n fn accept_message(\n &self,\n _id: &SessionId,\n _message: ClientJsonRpcMessage,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/sink_stream.rs", "use std::sync::Arc;\n\nuse futures::{Sink, Stream};\nuse tokio::sync::Mutex;\n\nuse super::{IntoTransport, Transport};\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\npub struct SinkStreamTransport {\n stream: St,\n sink: Arc>,\n}\n\nimpl SinkStreamTransport {\n pub fn new(sink: Si, stream: St) -> Self {\n Self {\n stream,\n sink: Arc::new(Mutex::new(sink)),\n }\n }\n}\n\nimpl Transport for SinkStreamTransport\nwhere\n St: Send + Stream> + Unpin,\n Si: Send + Sink> + Unpin + 'static,\n Si::Error: std::error::Error + Send + Sync + 'static,\n{\n type Error = Si::Error;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n use futures::SinkExt;\n let lock = self.sink.clone();\n async move {\n let mut write = lock.lock().await;\n write.send(item).await\n }\n }\n\n fn receive(&mut self) -> impl Future>> {\n use futures::StreamExt;\n self.stream.next()\n }\n\n async fn close(&mut self) -> Result<(), Self::Error> {\n Ok(())\n }\n}\n\npub enum TransportAdapterSinkStream {}\n\nimpl IntoTransport for (Si, St)\nwhere\n Role: ServiceRole,\n Si: Send + Sink> + Unpin + 'static,\n St: Send + Stream> + Unpin + 'static,\n Si::Error: std::error::Error + Send + Sync + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n SinkStreamTransport::new(self.0, self.1)\n }\n}\n\npub enum TransportAdapterAsyncCombinedRW {}\nimpl IntoTransport for S\nwhere\n Role: ServiceRole,\n S: Sink> + Stream> + Send + 'static,\n S::Error: std::error::Error + Send + Sync + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n use futures::StreamExt;\n IntoTransport::::into_transport(self.split())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server/wrapper/json.rs", "use serde::Serialize;\n\nuse crate::model::IntoContents;\n\n/// Json wrapper\n///\n/// This is used to tell the SDK to serialize the inner value into json\npub struct Json(pub T);\n\nimpl IntoContents for Json\nwhere\n T: Serialize,\n{\n fn into_contents(self) -> Vec {\n let result = crate::model::Content::json(self.0);\n debug_assert!(\n result.is_ok(),\n \"Json wrapped content should be able to serialized into json\"\n );\n match result {\n Ok(content) => vec![content],\n Err(e) => {\n tracing::error!(\"failed to convert json content: {e}\");\n vec![]\n }\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/session.rs", "use futures::Stream;\n\npub use crate::transport::common::server_side_http::SessionId;\nuse crate::{\n RoleServer,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::common::server_side_http::ServerSseMessage,\n};\n\npub mod local;\npub mod never;\n\npub trait SessionManager: Send + Sync + 'static {\n type Error: std::error::Error + Send + 'static;\n type Transport: crate::transport::Transport;\n /// Create a new session with the given id and configuration.\n fn create_session(\n &self,\n ) -> impl Future> + Send;\n fn initialize_session(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> impl Future> + Send;\n fn has_session(&self, id: &SessionId)\n -> impl Future> + Send;\n fn close_session(&self, id: &SessionId)\n -> impl Future> + Send;\n fn create_stream(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> impl Future<\n Output = Result + Send + Sync + 'static, Self::Error>,\n > + Send;\n fn accept_message(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> impl Future> + Send;\n fn create_standalone_stream(\n &self,\n id: &SessionId,\n ) -> impl Future<\n Output = Result + Send + Sync + 'static, Self::Error>,\n > + Send;\n fn resume(\n &self,\n id: &SessionId,\n last_event_id: String,\n ) -> impl Future<\n Output = Result + Send + Sync + 'static, Self::Error>,\n > + Send;\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common.rs", "#[cfg(any(\n feature = \"transport-streamable-http-server\",\n feature = \"transport-sse-server\"\n))]\npub mod server_side_http;\n\npub mod http_header;\n\n#[cfg(feature = \"__reqwest\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"reqwest\")))]\nmod reqwest;\n\n#[cfg(feature = \"client-side-sse\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client-side-sse\")))]\npub mod client_side_sse;\n\n#[cfg(feature = \"auth\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\npub mod auth;\n"], ["/rust-sdk/crates/rmcp/src/transport/io.rs", "/// # StdIO Transport\n///\n/// Create a pair of [`tokio::io::Stdin`] and [`tokio::io::Stdout`].\npub fn stdio() -> (tokio::io::Stdin, tokio::io::Stdout) {\n (tokio::io::stdin(), tokio::io::stdout())\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server.rs", "pub mod session;\n#[cfg(feature = \"transport-streamable-http-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server\")))]\npub mod tower;\npub use session::{SessionId, SessionManager};\n#[cfg(feature = \"transport-streamable-http-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server\")))]\npub use tower::{StreamableHttpServerConfig, StreamableHttpService};\n"], ["/rust-sdk/crates/rmcp/src/transport/common/auth.rs", "#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\nmod streamable_http_client;\n\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\nmod sse_client;\n"], ["/rust-sdk/crates/rmcp/src/transport/common/reqwest.rs", "#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\nmod streamable_http_client;\n\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\nmod sse_client;\n"], ["/rust-sdk/crates/rmcp/src/transport/common/http_header.rs", "pub const HEADER_SESSION_ID: &str = \"Mcp-Session-Id\";\npub const HEADER_LAST_EVENT_ID: &str = \"Last-Event-Id\";\npub const EVENT_STREAM_MIME_TYPE: &str = \"text/event-stream\";\npub const JSON_MIME_TYPE: &str = \"application/json\";\n"], ["/rust-sdk/crates/rmcp/src/handler.rs", "#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub mod client;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub mod server;\n"], ["/rust-sdk/crates/rmcp/src/handler/server/resource.rs", "\n"], ["/rust-sdk/crates/rmcp/src/transport/ws.rs", "// Maybe we don't really need a ws implementation?\n"], ["/rust-sdk/crates/rmcp/src/handler/server/wrapper.rs", "mod json;\npub use json::*;\n"], ["/rust-sdk/crates/rmcp/src/handler/server/router/prompt.rs", ""]], "task_instance_info": {"created_time": "2025-08-21 01:42:51", "created_task_model": "DeepSeek-R1", "function_skeleton": "pub async fn get_access_token (&self) -> Result {}", "code_description": "This code segment functions to: Check the current authentication state and conditionally retrieve an access token while enforcing proper authorization workflow transitions"}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```rust\n[TASK_BEGIN]\n pub async fn get_access_token(&self) -> Result {\n match self {\n OAuthState::Unauthorized(_) => Err(AuthError::AuthorizationRequired),\n OAuthState::Session(_) => Err(AuthError::AuthorizationRequired),\n OAuthState::Authorized(manager) => manager.get_access_token().await,\n OAuthState::AuthorizedHttpClient(client) => client.auth_manager.get_access_token().await,\n }\n }\n[TASK_END]\n```", "inference_time": "2025-08-21 01-43-08"}, "editdistance_info": {"edit_distance": 46.087, "calculate_time": "2025-08-21 01:43:08", "true_code_clean": "pub async fn get_access_token(&self) -> Result {\n match self {\n OAuthState::Unauthorized(manager) => manager.get_access_token().await,\n OAuthState::Session(_) => {\n Err(AuthError::InternalError(\"Not in manager state\".to_string()))\n }\n OAuthState::Authorized(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }", "predict_code_clean": "pub async fn get_access_token(&self) -> Result {\n match self {\n OAuthState::Unauthorized(_) => Err(AuthError::AuthorizationRequired),\n OAuthState::Session(_) => Err(AuthError::AuthorizationRequired),\n OAuthState::Authorized(manager) => manager.get_access_token().await,\n OAuthState::AuthorizedHttpClient(client) => client.auth_manager.get_access_token().await,\n }\n }"}} {"repo_name": "rust-sdk", "file_name": "/rust-sdk/crates/rmcp/src/transport/common/reqwest/sse_client.rs", "inference_info": {"prefix_code": "use std::sync::Arc;\n\nuse futures::StreamExt;\nuse http::Uri;\nuse reqwest::header::ACCEPT;\nuse sse_stream::SseStream;\n\nuse crate::transport::{\n SseClientTransport,\n common::http_header::{EVENT_STREAM_MIME_TYPE, HEADER_LAST_EVENT_ID},\n sse_client::{SseClient, SseClientConfig, SseTransportError},\n};\n\nimpl SseClient for reqwest::Client {\n type Error = reqwest::Error;\n\n ", "suffix_code": "\n\n async fn get_stream(\n &self,\n uri: Uri,\n last_event_id: Option,\n auth_token: Option,\n ) -> Result<\n crate::transport::common::client_side_sse::BoxedSseResponse,\n SseTransportError,\n > {\n let mut request_builder = self\n .get(uri.to_string())\n .header(ACCEPT, EVENT_STREAM_MIME_TYPE);\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n if let Some(last_event_id) = last_event_id {\n request_builder = request_builder.header(HEADER_LAST_EVENT_ID, last_event_id);\n }\n let response = request_builder.send().await?;\n let response = response.error_for_status()?;\n match response.headers().get(reqwest::header::CONTENT_TYPE) {\n Some(ct) => {\n if !ct.as_bytes().starts_with(EVENT_STREAM_MIME_TYPE.as_bytes()) {\n return Err(SseTransportError::UnexpectedContentType(Some(ct.clone())));\n }\n }\n None => {\n return Err(SseTransportError::UnexpectedContentType(None));\n }\n }\n let event_stream = SseStream::from_byte_stream(response.bytes_stream()).boxed();\n Ok(event_stream)\n }\n}\n\nimpl SseClientTransport {\n pub async fn start(\n uri: impl Into>,\n ) -> Result> {\n SseClientTransport::start_with_client(\n reqwest::Client::default(),\n SseClientConfig {\n sse_endpoint: uri.into(),\n ..Default::default()\n },\n )\n .await\n }\n}\n", "middle_code": "async fn post_message(\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n auth_token: Option,\n ) -> Result<(), SseTransportError> {\n let mut request_builder = self.post(uri.to_string()).json(&message);\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n request_builder\n .send()\n .await\n .and_then(|resp| resp.error_for_status())\n .map_err(SseTransportError::from)\n .map(drop)\n }", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "rust", "sub_task_type": null}, "context_code": [["/rust-sdk/crates/rmcp/src/transport/common/reqwest/streamable_http_client.rs", "use std::sync::Arc;\n\nuse futures::{StreamExt, stream::BoxStream};\nuse reqwest::header::ACCEPT;\nuse sse_stream::{Sse, SseStream};\n\nuse crate::{\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::{\n common::http_header::{\n EVENT_STREAM_MIME_TYPE, HEADER_LAST_EVENT_ID, HEADER_SESSION_ID, JSON_MIME_TYPE,\n },\n streamable_http_client::*,\n },\n};\n\nimpl StreamableHttpClient for reqwest::Client {\n type Error = reqwest::Error;\n\n async fn get_stream(\n &self,\n uri: Arc,\n session_id: Arc,\n last_event_id: Option,\n auth_token: Option,\n ) -> Result>, StreamableHttpError> {\n let mut request_builder = self\n .get(uri.as_ref())\n .header(ACCEPT, EVENT_STREAM_MIME_TYPE)\n .header(HEADER_SESSION_ID, session_id.as_ref());\n if let Some(last_event_id) = last_event_id {\n request_builder = request_builder.header(HEADER_LAST_EVENT_ID, last_event_id);\n }\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n let response = request_builder.send().await?;\n if response.status() == reqwest::StatusCode::METHOD_NOT_ALLOWED {\n return Err(StreamableHttpError::SeverDoesNotSupportSse);\n }\n let response = response.error_for_status()?;\n match response.headers().get(reqwest::header::CONTENT_TYPE) {\n Some(ct) => {\n if !ct.as_bytes().starts_with(EVENT_STREAM_MIME_TYPE.as_bytes()) {\n return Err(StreamableHttpError::UnexpectedContentType(Some(\n String::from_utf8_lossy(ct.as_bytes()).to_string(),\n )));\n }\n }\n None => {\n return Err(StreamableHttpError::UnexpectedContentType(None));\n }\n }\n let event_stream = SseStream::from_byte_stream(response.bytes_stream()).boxed();\n Ok(event_stream)\n }\n\n async fn delete_session(\n &self,\n uri: Arc,\n session: Arc,\n auth_token: Option,\n ) -> Result<(), StreamableHttpError> {\n let mut request_builder = self.delete(uri.as_ref());\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n let response = request_builder\n .header(HEADER_SESSION_ID, session.as_ref())\n .send()\n .await?;\n\n // if method no allowed\n if response.status() == reqwest::StatusCode::METHOD_NOT_ALLOWED {\n tracing::debug!(\"this server doesn't support deleting session\");\n return Ok(());\n }\n let _response = response.error_for_status()?;\n Ok(())\n }\n\n async fn post_message(\n &self,\n uri: Arc,\n message: ClientJsonRpcMessage,\n session_id: Option>,\n auth_token: Option,\n ) -> Result> {\n let mut request = self\n .post(uri.as_ref())\n .header(ACCEPT, [EVENT_STREAM_MIME_TYPE, JSON_MIME_TYPE].join(\", \"));\n if let Some(auth_header) = auth_token {\n request = request.bearer_auth(auth_header);\n }\n if let Some(session_id) = session_id {\n request = request.header(HEADER_SESSION_ID, session_id.as_ref());\n }\n let response = request.json(&message).send().await?.error_for_status()?;\n if response.status() == reqwest::StatusCode::ACCEPTED {\n return Ok(StreamableHttpPostResponse::Accepted);\n }\n let content_type = response.headers().get(reqwest::header::CONTENT_TYPE);\n let session_id = response.headers().get(HEADER_SESSION_ID);\n let session_id = session_id\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_string());\n match content_type {\n Some(ct) if ct.as_bytes().starts_with(EVENT_STREAM_MIME_TYPE.as_bytes()) => {\n let event_stream = SseStream::from_byte_stream(response.bytes_stream()).boxed();\n Ok(StreamableHttpPostResponse::Sse(event_stream, session_id))\n }\n Some(ct) if ct.as_bytes().starts_with(JSON_MIME_TYPE.as_bytes()) => {\n let message: ServerJsonRpcMessage = response.json().await?;\n Ok(StreamableHttpPostResponse::Json(message, session_id))\n }\n _ => {\n // unexpected content type\n tracing::error!(\"unexpected content type: {:?}\", content_type);\n Err(StreamableHttpError::UnexpectedContentType(\n content_type.map(|ct| String::from_utf8_lossy(ct.as_bytes()).to_string()),\n ))\n }\n }\n }\n}\n\nimpl StreamableHttpClientTransport {\n pub fn from_uri(uri: impl Into>) -> Self {\n StreamableHttpClientTransport::with_client(\n reqwest::Client::default(),\n StreamableHttpClientTransportConfig {\n uri: uri.into(),\n ..Default::default()\n },\n )\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/sse_client.rs", "//! reference: https://html.spec.whatwg.org/multipage/server-sent-events.html\nuse std::{pin::Pin, sync::Arc};\n\nuse futures::{StreamExt, future::BoxFuture};\nuse http::Uri;\nuse reqwest::header::HeaderValue;\nuse sse_stream::Error as SseError;\nuse thiserror::Error;\n\nuse super::{\n Transport,\n common::client_side_sse::{BoxedSseResponse, SseRetryPolicy, SseStreamReconnect},\n};\nuse crate::{\n RoleClient,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::common::client_side_sse::SseAutoReconnectStream,\n};\n\n#[derive(Error, Debug)]\npub enum SseTransportError {\n #[error(\"SSE error: {0}\")]\n Sse(#[from] SseError),\n #[error(\"IO error: {0}\")]\n Io(#[from] std::io::Error),\n #[error(\"Client error: {0}\")]\n Client(E),\n #[error(\"unexpected end of stream\")]\n UnexpectedEndOfStream,\n #[error(\"Unexpected content type: {0:?}\")]\n UnexpectedContentType(Option),\n #[cfg(feature = \"auth\")]\n #[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\n #[error(\"Auth error: {0}\")]\n Auth(#[from] crate::transport::auth::AuthError),\n #[error(\"Invalid uri: {0}\")]\n InvalidUri(#[from] http::uri::InvalidUri),\n #[error(\"Invalid uri parts: {0}\")]\n InvalidUriParts(#[from] http::uri::InvalidUriParts),\n}\n\nimpl From for SseTransportError {\n fn from(e: reqwest::Error) -> Self {\n SseTransportError::Client(e)\n }\n}\n\npub trait SseClient: Clone + Send + Sync + 'static {\n type Error: std::error::Error + Send + Sync + 'static;\n fn post_message(\n &self,\n uri: Uri,\n message: ClientJsonRpcMessage,\n auth_token: Option,\n ) -> impl Future>> + Send + '_;\n fn get_stream(\n &self,\n uri: Uri,\n last_event_id: Option,\n auth_token: Option,\n ) -> impl Future>> + Send + '_;\n}\n\nstruct SseClientReconnect {\n pub client: C,\n pub uri: Uri,\n}\n\nimpl SseStreamReconnect for SseClientReconnect {\n type Error = SseTransportError;\n type Future = BoxFuture<'static, Result>;\n fn retry_connection(&mut self, last_event_id: Option<&str>) -> Self::Future {\n let client = self.client.clone();\n let uri = self.uri.clone();\n let last_event_id = last_event_id.map(|s| s.to_owned());\n Box::pin(async move { client.get_stream(uri, last_event_id, None).await })\n }\n}\ntype ServerMessageStream = Pin>>>;\npub struct SseClientTransport {\n client: C,\n config: SseClientConfig,\n message_endpoint: Uri,\n stream: Option>,\n}\n\nimpl Transport for SseClientTransport {\n type Error = SseTransportError;\n async fn receive(&mut self) -> Option {\n self.stream.as_mut()?.next().await?.ok()\n }\n fn send(\n &mut self,\n item: crate::service::TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let client = self.client.clone();\n let uri = self.message_endpoint.clone();\n async move { client.post_message(uri, item, None).await }\n }\n async fn close(&mut self) -> Result<(), Self::Error> {\n self.stream.take();\n Ok(())\n }\n}\n\nimpl std::fmt::Debug for SseClientTransport {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"SseClientWorker\")\n .field(\"client\", &self.client)\n .field(\"config\", &self.config)\n .finish()\n }\n}\n\nimpl SseClientTransport {\n pub async fn start_with_client(\n client: C,\n config: SseClientConfig,\n ) -> Result> {\n let sse_endpoint = config.sse_endpoint.as_ref().parse::()?;\n\n let mut sse_stream = client.get_stream(sse_endpoint.clone(), None, None).await?;\n let message_endpoint = if let Some(endpoint) = config.use_message_endpoint.clone() {\n let ep = endpoint.parse::()?;\n let mut sse_endpoint_parts = sse_endpoint.clone().into_parts();\n sse_endpoint_parts.path_and_query = ep.into_parts().path_and_query;\n Uri::from_parts(sse_endpoint_parts)?\n } else {\n // wait the endpoint event\n loop {\n let sse = sse_stream\n .next()\n .await\n .ok_or(SseTransportError::UnexpectedEndOfStream)??;\n let Some(\"endpoint\") = sse.event.as_deref() else {\n continue;\n };\n let ep = sse.data.unwrap_or_default();\n\n break message_endpoint(sse_endpoint.clone(), ep)?;\n }\n };\n\n let stream = Box::pin(SseAutoReconnectStream::new(\n sse_stream,\n SseClientReconnect {\n client: client.clone(),\n uri: sse_endpoint.clone(),\n },\n config.retry_policy.clone(),\n ));\n Ok(Self {\n client,\n config,\n message_endpoint,\n stream: Some(stream),\n })\n }\n}\n\nfn message_endpoint(base: http::Uri, endpoint: String) -> Result {\n // If endpoint is a full URL, parse and return it directly\n if endpoint.starts_with(\"http://\") || endpoint.starts_with(\"https://\") {\n return endpoint.parse::();\n }\n\n let mut base_parts = base.into_parts();\n let endpoint_clone = endpoint.clone();\n\n if endpoint.starts_with(\"?\") {\n // Query only - keep base path and append query\n if let Some(base_path_and_query) = &base_parts.path_and_query {\n let base_path = base_path_and_query.path();\n base_parts.path_and_query = Some(format!(\"{}{}\", base_path, endpoint).parse()?);\n } else {\n base_parts.path_and_query = Some(format!(\"/{}\", endpoint).parse()?);\n }\n } else {\n // Path (with optional query) - replace entire path_and_query\n let path_to_use = if endpoint.starts_with(\"/\") {\n endpoint // Use absolute path as-is\n } else {\n format!(\"/{}\", endpoint) // Make relative path absolute\n };\n base_parts.path_and_query = Some(path_to_use.parse()?);\n }\n\n http::Uri::from_parts(base_parts).map_err(|_| endpoint_clone.parse::().unwrap_err())\n}\n\n#[derive(Debug, Clone)]\npub struct SseClientConfig {\n /// client sse endpoint\n ///\n /// # How this client resolve the message endpoint\n /// if sse_endpoint has this format: ``,\n /// then the message endpoint will be ``.\n ///\n /// For example, if you config the sse_endpoint as `http://example.com/some_path/sse`,\n /// and the server send the message endpoint event as `message?session_id=123`,\n /// then the message endpoint will be `http://example.com/message`.\n ///\n /// This follow the rules of JavaScript's [`new URL(url, base)`](https://developer.mozilla.org/zh-CN/docs/Web/API/URL/URL)\n pub sse_endpoint: Arc,\n pub retry_policy: Arc,\n /// if this is settled, the client will use this endpoint to send message and skip get the endpoint event\n pub use_message_endpoint: Option,\n}\n\nimpl Default for SseClientConfig {\n fn default() -> Self {\n Self {\n sse_endpoint: \"\".into(),\n retry_policy: Arc::new(super::common::client_side_sse::FixedInterval::default()),\n use_message_endpoint: None,\n }\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_message_endpoint() {\n let base_url = \"https://localhost/sse\".parse::().unwrap();\n\n // Query only\n let result = message_endpoint(base_url.clone(), \"?sessionId=x\".to_string()).unwrap();\n assert_eq!(result.to_string(), \"https://localhost/sse?sessionId=x\");\n\n // Relative path with query\n let result = message_endpoint(base_url.clone(), \"mypath?sessionId=x\".to_string()).unwrap();\n assert_eq!(result.to_string(), \"https://localhost/mypath?sessionId=x\");\n\n // Absolute path with query\n let result = message_endpoint(base_url.clone(), \"/xxx?sessionId=x\".to_string()).unwrap();\n assert_eq!(result.to_string(), \"https://localhost/xxx?sessionId=x\");\n\n // Full URL\n let result = message_endpoint(\n base_url.clone(),\n \"http://example.com/xxx?sessionId=x\".to_string(),\n )\n .unwrap();\n assert_eq!(result.to_string(), \"http://example.com/xxx?sessionId=x\");\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_client.rs", "use std::{borrow::Cow, sync::Arc, time::Duration};\n\nuse futures::{Stream, StreamExt, future::BoxFuture, stream::BoxStream};\npub use sse_stream::Error as SseError;\nuse sse_stream::Sse;\nuse thiserror::Error;\nuse tokio_util::sync::CancellationToken;\n\nuse super::common::client_side_sse::{ExponentialBackoff, SseRetryPolicy, SseStreamReconnect};\nuse crate::{\n RoleClient,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::{\n common::client_side_sse::SseAutoReconnectStream,\n worker::{Worker, WorkerQuitReason, WorkerSendRequest, WorkerTransport},\n },\n};\n\ntype BoxedSseStream = BoxStream<'static, Result>;\n\n#[derive(Error, Debug)]\npub enum StreamableHttpError {\n #[error(\"SSE error: {0}\")]\n Sse(#[from] SseError),\n #[error(\"Io error: {0}\")]\n Io(#[from] std::io::Error),\n #[error(\"Client error: {0}\")]\n Client(E),\n #[error(\"unexpected end of stream\")]\n UnexpectedEndOfStream,\n #[error(\"unexpected server response: {0}\")]\n UnexpectedServerResponse(Cow<'static, str>),\n #[error(\"Unexpected content type: {0:?}\")]\n UnexpectedContentType(Option),\n #[error(\"Server does not support SSE\")]\n SeverDoesNotSupportSse,\n #[error(\"Server does not support delete session\")]\n SeverDoesNotSupportDeleteSession,\n #[error(\"Tokio join error: {0}\")]\n TokioJoinError(#[from] tokio::task::JoinError),\n #[error(\"Deserialize error: {0}\")]\n Deserialize(#[from] serde_json::Error),\n #[error(\"Transport channel closed\")]\n TransportChannelClosed,\n #[cfg(feature = \"auth\")]\n #[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\n #[error(\"Auth error: {0}\")]\n Auth(#[from] crate::transport::auth::AuthError),\n}\n\nimpl From for StreamableHttpError {\n fn from(e: reqwest::Error) -> Self {\n StreamableHttpError::Client(e)\n }\n}\n\npub enum StreamableHttpPostResponse {\n Accepted,\n Json(ServerJsonRpcMessage, Option),\n Sse(BoxedSseStream, Option),\n}\n\nimpl std::fmt::Debug for StreamableHttpPostResponse {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n Self::Accepted => write!(f, \"Accepted\"),\n Self::Json(arg0, arg1) => f.debug_tuple(\"Json\").field(arg0).field(arg1).finish(),\n Self::Sse(_, arg1) => f.debug_tuple(\"Sse\").field(arg1).finish(),\n }\n }\n}\n\nimpl StreamableHttpPostResponse {\n pub async fn expect_initialized(\n self,\n ) -> Result<(ServerJsonRpcMessage, Option), StreamableHttpError>\n where\n E: std::error::Error + Send + Sync + 'static,\n {\n match self {\n Self::Json(message, session_id) => Ok((message, session_id)),\n Self::Sse(mut stream, session_id) => {\n let event =\n stream\n .next()\n .await\n .ok_or(StreamableHttpError::UnexpectedServerResponse(\n \"empty sse stream\".into(),\n ))??;\n let message: ServerJsonRpcMessage =\n serde_json::from_str(&event.data.unwrap_or_default())?;\n Ok((message, session_id))\n }\n _ => Err(StreamableHttpError::UnexpectedServerResponse(\n \"expect initialized, accepted\".into(),\n )),\n }\n }\n\n pub fn expect_json(self) -> Result>\n where\n E: std::error::Error + Send + Sync + 'static,\n {\n match self {\n Self::Json(message, ..) => Ok(message),\n got => Err(StreamableHttpError::UnexpectedServerResponse(\n format!(\"expect json, got {got:?}\").into(),\n )),\n }\n }\n\n pub fn expect_accepted(self) -> Result<(), StreamableHttpError>\n where\n E: std::error::Error + Send + Sync + 'static,\n {\n match self {\n Self::Accepted => Ok(()),\n got => Err(StreamableHttpError::UnexpectedServerResponse(\n format!(\"expect accepted, got {got:?}\").into(),\n )),\n }\n }\n}\n\npub trait StreamableHttpClient: Clone + Send + 'static {\n type Error: std::error::Error + Send + Sync + 'static;\n fn post_message(\n &self,\n uri: Arc,\n message: ClientJsonRpcMessage,\n session_id: Option>,\n auth_header: Option,\n ) -> impl Future>>\n + Send\n + '_;\n fn delete_session(\n &self,\n uri: Arc,\n session_id: Arc,\n auth_header: Option,\n ) -> impl Future>> + Send + '_;\n fn get_stream(\n &self,\n uri: Arc,\n session_id: Arc,\n last_event_id: Option,\n auth_header: Option,\n ) -> impl Future<\n Output = Result<\n BoxStream<'static, Result>,\n StreamableHttpError,\n >,\n > + Send\n + '_;\n}\n\npub struct RetryConfig {\n pub max_times: Option,\n pub min_duration: Duration,\n}\n\nstruct StreamableHttpClientReconnect {\n pub client: C,\n pub session_id: Arc,\n pub uri: Arc,\n}\n\nimpl SseStreamReconnect for StreamableHttpClientReconnect {\n type Error = StreamableHttpError;\n type Future = BoxFuture<'static, Result>;\n fn retry_connection(&mut self, last_event_id: Option<&str>) -> Self::Future {\n let client = self.client.clone();\n let uri = self.uri.clone();\n let session_id = self.session_id.clone();\n let last_event_id = last_event_id.map(|s| s.to_owned());\n Box::pin(async move {\n client\n .get_stream(uri, session_id, last_event_id, None)\n .await\n })\n }\n}\n\n#[derive(Debug, Clone, Default)]\npub struct StreamableHttpClientWorker {\n pub client: C,\n pub config: StreamableHttpClientTransportConfig,\n}\n\nimpl StreamableHttpClientWorker {\n pub fn new_simple(url: impl Into>) -> Self {\n Self {\n client: C::default(),\n config: StreamableHttpClientTransportConfig {\n uri: url.into(),\n ..Default::default()\n },\n }\n }\n}\n\nimpl StreamableHttpClientWorker {\n pub fn new(client: C, config: StreamableHttpClientTransportConfig) -> Self {\n Self { client, config }\n }\n}\n\nimpl StreamableHttpClientWorker {\n async fn execute_sse_stream(\n sse_stream: impl Stream>>\n + Send\n + 'static,\n sse_worker_tx: tokio::sync::mpsc::Sender,\n close_on_response: bool,\n ct: CancellationToken,\n ) -> Result<(), StreamableHttpError> {\n let mut sse_stream = std::pin::pin!(sse_stream);\n loop {\n let message = tokio::select! {\n event = sse_stream.next() => {\n event\n }\n _ = ct.cancelled() => {\n tracing::debug!(\"cancelled\");\n break;\n }\n };\n let Some(message) = message.transpose()? else {\n break;\n };\n let is_response = matches!(message, ServerJsonRpcMessage::Response(_));\n let yield_result = sse_worker_tx.send(message).await;\n if yield_result.is_err() {\n tracing::trace!(\"streamable http transport worker dropped, exiting\");\n break;\n }\n if close_on_response && is_response {\n tracing::debug!(\"got response, closing sse stream\");\n break;\n }\n }\n Ok(())\n }\n}\n\nimpl Worker for StreamableHttpClientWorker {\n type Role = RoleClient;\n type Error = StreamableHttpError;\n fn err_closed() -> Self::Error {\n StreamableHttpError::TransportChannelClosed\n }\n fn err_join(e: tokio::task::JoinError) -> Self::Error {\n StreamableHttpError::TokioJoinError(e)\n }\n fn config(&self) -> super::worker::WorkerConfig {\n super::worker::WorkerConfig {\n name: Some(\"StreamableHttpClientWorker\".into()),\n channel_buffer_capacity: self.config.channel_buffer_capacity,\n }\n }\n async fn run(\n self,\n mut context: super::worker::WorkerContext,\n ) -> Result<(), WorkerQuitReason> {\n let channel_buffer_capacity = self.config.channel_buffer_capacity;\n let (sse_worker_tx, mut sse_worker_rx) =\n tokio::sync::mpsc::channel::(channel_buffer_capacity);\n let config = self.config.clone();\n let transport_task_ct = context.cancellation_token.clone();\n let _drop_guard = transport_task_ct.clone().drop_guard();\n let WorkerSendRequest {\n responder,\n message: initialize_request,\n } = context.recv_from_handler().await?;\n let _ = responder.send(Ok(()));\n let (message, session_id) = self\n .client\n .post_message(config.uri.clone(), initialize_request, None, None)\n .await\n .map_err(WorkerQuitReason::fatal_context(\"send initialize request\"))?\n .expect_initialized::()\n .await\n .map_err(WorkerQuitReason::fatal_context(\n \"process initialize response\",\n ))?;\n let session_id: Option> = if let Some(session_id) = session_id {\n Some(session_id.into())\n } else {\n if !self.config.allow_stateless {\n return Err(WorkerQuitReason::fatal(\n \"missing session id in initialize response\",\n \"process initialize response\",\n ));\n }\n None\n };\n // delete session when drop guard is dropped\n if let Some(session_id) = &session_id {\n let ct = transport_task_ct.clone();\n let client = self.client.clone();\n let session_id = session_id.clone();\n let url = config.uri.clone();\n tokio::spawn(async move {\n ct.cancelled().await;\n let delete_session_result =\n client.delete_session(url, session_id.clone(), None).await;\n match delete_session_result {\n Ok(_) => {\n tracing::info!(session_id = session_id.as_ref(), \"delete session success\")\n }\n Err(StreamableHttpError::SeverDoesNotSupportDeleteSession) => {\n tracing::info!(\n session_id = session_id.as_ref(),\n \"server doesn't support delete session\"\n )\n }\n Err(e) => {\n tracing::error!(\n session_id = session_id.as_ref(),\n \"fail to delete session: {e}\"\n );\n }\n };\n });\n }\n\n context.send_to_handler(message).await?;\n let initialized_notification = context.recv_from_handler().await?;\n // expect a initialized response\n self.client\n .post_message(\n config.uri.clone(),\n initialized_notification.message,\n session_id.clone(),\n None,\n )\n .await\n .map_err(WorkerQuitReason::fatal_context(\n \"send initialized notification\",\n ))?\n .expect_accepted::()\n .map_err(WorkerQuitReason::fatal_context(\n \"process initialized notification response\",\n ))?;\n let _ = initialized_notification.responder.send(Ok(()));\n enum Event {\n ClientMessage(WorkerSendRequest),\n ServerMessage(ServerJsonRpcMessage),\n StreamResult(Result<(), StreamableHttpError>),\n }\n let mut streams = tokio::task::JoinSet::new();\n if let Some(session_id) = &session_id {\n match self\n .client\n .get_stream(config.uri.clone(), session_id.clone(), None, None)\n .await\n {\n Ok(stream) => {\n let sse_stream = SseAutoReconnectStream::new(\n stream,\n StreamableHttpClientReconnect {\n client: self.client.clone(),\n session_id: session_id.clone(),\n uri: config.uri.clone(),\n },\n self.config.retry_config.clone(),\n );\n streams.spawn(Self::execute_sse_stream(\n sse_stream,\n sse_worker_tx.clone(),\n false,\n transport_task_ct.child_token(),\n ));\n tracing::debug!(\"got common stream\");\n }\n Err(StreamableHttpError::SeverDoesNotSupportSse) => {\n tracing::debug!(\"server doesn't support sse, skip common stream\");\n }\n Err(e) => {\n // fail to get common stream\n tracing::error!(\"fail to get common stream: {e}\");\n return Err(WorkerQuitReason::fatal(\n \"fail to get general purpose event stream\",\n \"get general purpose event stream\",\n ));\n }\n }\n }\n loop {\n let event = tokio::select! {\n _ = transport_task_ct.cancelled() => {\n tracing::debug!(\"cancelled\");\n return Err(WorkerQuitReason::Cancelled);\n }\n message = context.recv_from_handler() => {\n let message = message?;\n Event::ClientMessage(message)\n },\n message = sse_worker_rx.recv() => {\n let Some(message) = message else {\n tracing::trace!(\"transport dropped, exiting\");\n return Err(WorkerQuitReason::HandlerTerminated);\n };\n Event::ServerMessage(message)\n },\n terminated_stream = streams.join_next(), if !streams.is_empty() => {\n match terminated_stream {\n Some(result) => {\n Event::StreamResult(result.map_err(StreamableHttpError::TokioJoinError).and_then(std::convert::identity))\n }\n None => {\n continue\n }\n }\n }\n };\n match event {\n Event::ClientMessage(send_request) => {\n let WorkerSendRequest { message, responder } = send_request;\n let response = self\n .client\n .post_message(config.uri.clone(), message, session_id.clone(), None)\n .await;\n let send_result = match response {\n Err(e) => Err(e),\n Ok(StreamableHttpPostResponse::Accepted) => {\n tracing::trace!(\"client message accepted\");\n Ok(())\n }\n Ok(StreamableHttpPostResponse::Json(message, ..)) => {\n context.send_to_handler(message).await?;\n Ok(())\n }\n Ok(StreamableHttpPostResponse::Sse(stream, ..)) => {\n if let Some(session_id) = &session_id {\n let sse_stream = SseAutoReconnectStream::new(\n stream,\n StreamableHttpClientReconnect {\n client: self.client.clone(),\n session_id: session_id.clone(),\n uri: config.uri.clone(),\n },\n self.config.retry_config.clone(),\n );\n streams.spawn(Self::execute_sse_stream(\n sse_stream,\n sse_worker_tx.clone(),\n true,\n transport_task_ct.child_token(),\n ));\n } else {\n let sse_stream = SseAutoReconnectStream::never_reconnect(\n stream,\n StreamableHttpError::::UnexpectedEndOfStream,\n );\n streams.spawn(Self::execute_sse_stream(\n sse_stream,\n sse_worker_tx.clone(),\n true,\n transport_task_ct.child_token(),\n ));\n }\n tracing::trace!(\"got new sse stream\");\n Ok(())\n }\n };\n let _ = responder.send(send_result);\n }\n Event::ServerMessage(json_rpc_message) => {\n // send the message to the handler\n context.send_to_handler(json_rpc_message).await?;\n }\n Event::StreamResult(result) => {\n if result.is_err() {\n tracing::warn!(\n \"sse client event stream terminated with error: {:?}\",\n result\n );\n }\n }\n }\n }\n }\n}\n\npub type StreamableHttpClientTransport = WorkerTransport>;\n\nimpl StreamableHttpClientTransport {\n pub fn with_client(client: C, config: StreamableHttpClientTransportConfig) -> Self {\n let worker = StreamableHttpClientWorker::new(client, config);\n WorkerTransport::spawn(worker)\n }\n}\n#[derive(Debug, Clone)]\npub struct StreamableHttpClientTransportConfig {\n pub uri: Arc,\n pub retry_config: Arc,\n pub channel_buffer_capacity: usize,\n /// if true, the transport will not require a session to be established\n pub allow_stateless: bool,\n}\n\nimpl StreamableHttpClientTransportConfig {\n pub fn with_uri(uri: impl Into>) -> Self {\n Self {\n uri: uri.into(),\n ..Default::default()\n }\n }\n}\n\nimpl Default for StreamableHttpClientTransportConfig {\n fn default() -> Self {\n Self {\n uri: \"localhost\".into(),\n retry_config: Arc::new(ExponentialBackoff::default()),\n channel_buffer_capacity: 16,\n allow_stateless: true,\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/tower.rs", "use std::{convert::Infallible, fmt::Display, sync::Arc, time::Duration};\n\nuse bytes::Bytes;\nuse futures::{StreamExt, future::BoxFuture};\nuse http::{Method, Request, Response, header::ALLOW};\nuse http_body::Body;\nuse http_body_util::{BodyExt, Full, combinators::BoxBody};\nuse tokio_stream::wrappers::ReceiverStream;\n\nuse super::session::SessionManager;\nuse crate::{\n RoleServer,\n model::{ClientJsonRpcMessage, ClientRequest, GetExtensions},\n serve_server,\n service::serve_directly,\n transport::{\n OneshotTransport, TransportAdapterIdentity,\n common::{\n http_header::{\n EVENT_STREAM_MIME_TYPE, HEADER_LAST_EVENT_ID, HEADER_SESSION_ID, JSON_MIME_TYPE,\n },\n server_side_http::{\n BoxResponse, ServerSseMessage, accepted_response, expect_json,\n internal_error_response, sse_stream_response, unexpected_message_response,\n },\n },\n },\n};\n\n#[derive(Debug, Clone)]\npub struct StreamableHttpServerConfig {\n /// The ping message duration for SSE connections.\n pub sse_keep_alive: Option,\n /// If true, the server will create a session for each request and keep it alive.\n pub stateful_mode: bool,\n}\n\nimpl Default for StreamableHttpServerConfig {\n fn default() -> Self {\n Self {\n sse_keep_alive: Some(Duration::from_secs(15)),\n stateful_mode: true,\n }\n }\n}\n\npub struct StreamableHttpService {\n pub config: StreamableHttpServerConfig,\n session_manager: Arc,\n service_factory: Arc Result + Send + Sync>,\n}\n\nimpl Clone for StreamableHttpService {\n fn clone(&self) -> Self {\n Self {\n config: self.config.clone(),\n session_manager: self.session_manager.clone(),\n service_factory: self.service_factory.clone(),\n }\n }\n}\n\nimpl tower_service::Service> for StreamableHttpService\nwhere\n RequestBody: Body + Send + 'static,\n S: crate::Service,\n M: SessionManager,\n RequestBody::Error: Display,\n RequestBody::Data: Send + 'static,\n{\n type Response = BoxResponse;\n type Error = Infallible;\n type Future = BoxFuture<'static, Result>;\n fn call(&mut self, req: http::Request) -> Self::Future {\n let service = self.clone();\n Box::pin(async move {\n let response = service.handle(req).await;\n Ok(response)\n })\n }\n fn poll_ready(\n &mut self,\n _cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n std::task::Poll::Ready(Ok(()))\n }\n}\n\nimpl StreamableHttpService\nwhere\n S: crate::Service + Send + 'static,\n M: SessionManager,\n{\n pub fn new(\n service_factory: impl Fn() -> Result + Send + Sync + 'static,\n session_manager: Arc,\n config: StreamableHttpServerConfig,\n ) -> Self {\n Self {\n config,\n session_manager,\n service_factory: Arc::new(service_factory),\n }\n }\n fn get_service(&self) -> Result {\n (self.service_factory)()\n }\n pub async fn handle(&self, request: Request) -> Response>\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n let method = request.method().clone();\n let result = match method {\n Method::GET => self.handle_get(request).await,\n Method::POST => self.handle_post(request).await,\n Method::DELETE => self.handle_delete(request).await,\n _ => {\n // Handle other methods or return an error\n let response = Response::builder()\n .status(http::StatusCode::METHOD_NOT_ALLOWED)\n .header(ALLOW, \"GET, POST, DELETE\")\n .body(Full::new(Bytes::from(\"Method Not Allowed\")).boxed())\n .expect(\"valid response\");\n return response;\n }\n };\n match result {\n Ok(response) => response,\n Err(response) => response,\n }\n }\n async fn handle_get(&self, request: Request) -> Result\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n // check accept header\n if !request\n .headers()\n .get(http::header::ACCEPT)\n .and_then(|header| header.to_str().ok())\n .is_some_and(|header| header.contains(EVENT_STREAM_MIME_TYPE))\n {\n return Ok(Response::builder()\n .status(http::StatusCode::NOT_ACCEPTABLE)\n .body(\n Full::new(Bytes::from(\n \"Not Acceptable: Client must accept text/event-stream\",\n ))\n .boxed(),\n )\n .expect(\"valid response\"));\n }\n // check session id\n let session_id = request\n .headers()\n .get(HEADER_SESSION_ID)\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_owned().into());\n let Some(session_id) = session_id else {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session ID is required\")).boxed())\n .expect(\"valid response\"));\n };\n // check if session exists\n let has_session = self\n .session_manager\n .has_session(&session_id)\n .await\n .map_err(internal_error_response(\"check session\"))?;\n if !has_session {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session not found\")).boxed())\n .expect(\"valid response\"));\n }\n // check if last event id is provided\n let last_event_id = request\n .headers()\n .get(HEADER_LAST_EVENT_ID)\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_owned());\n if let Some(last_event_id) = last_event_id {\n // check if session has this event id\n let stream = self\n .session_manager\n .resume(&session_id, last_event_id)\n .await\n .map_err(internal_error_response(\"resume session\"))?;\n Ok(sse_stream_response(stream, self.config.sse_keep_alive))\n } else {\n // create standalone stream\n let stream = self\n .session_manager\n .create_standalone_stream(&session_id)\n .await\n .map_err(internal_error_response(\"create standalone stream\"))?;\n Ok(sse_stream_response(stream, self.config.sse_keep_alive))\n }\n }\n\n async fn handle_post(&self, request: Request) -> Result\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n // check accept header\n if !request\n .headers()\n .get(http::header::ACCEPT)\n .and_then(|header| header.to_str().ok())\n .is_some_and(|header| {\n header.contains(JSON_MIME_TYPE) && header.contains(EVENT_STREAM_MIME_TYPE)\n })\n {\n return Ok(Response::builder()\n .status(http::StatusCode::NOT_ACCEPTABLE)\n .body(Full::new(Bytes::from(\"Not Acceptable: Client must accept both application/json and text/event-stream\")).boxed())\n .expect(\"valid response\"));\n }\n\n // check content type\n if !request\n .headers()\n .get(http::header::CONTENT_TYPE)\n .and_then(|header| header.to_str().ok())\n .is_some_and(|header| header.starts_with(JSON_MIME_TYPE))\n {\n return Ok(Response::builder()\n .status(http::StatusCode::UNSUPPORTED_MEDIA_TYPE)\n .body(\n Full::new(Bytes::from(\n \"Unsupported Media Type: Content-Type must be application/json\",\n ))\n .boxed(),\n )\n .expect(\"valid response\"));\n }\n\n // json deserialize request body\n let (part, body) = request.into_parts();\n let mut message = match expect_json(body).await {\n Ok(message) => message,\n Err(response) => return Ok(response),\n };\n\n if self.config.stateful_mode {\n // do we have a session id?\n let session_id = part\n .headers\n .get(HEADER_SESSION_ID)\n .and_then(|v| v.to_str().ok());\n if let Some(session_id) = session_id {\n let session_id = session_id.to_owned().into();\n let has_session = self\n .session_manager\n .has_session(&session_id)\n .await\n .map_err(internal_error_response(\"check session\"))?;\n if !has_session {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session not found\")).boxed())\n .expect(\"valid response\"));\n }\n\n // inject request part to extensions\n match &mut message {\n ClientJsonRpcMessage::Request(req) => {\n req.request.extensions_mut().insert(part);\n }\n ClientJsonRpcMessage::Notification(not) => {\n not.notification.extensions_mut().insert(part);\n }\n _ => {\n // skip\n }\n }\n\n match message {\n ClientJsonRpcMessage::Request(_) => {\n let stream = self\n .session_manager\n .create_stream(&session_id, message)\n .await\n .map_err(internal_error_response(\"get session\"))?;\n Ok(sse_stream_response(stream, self.config.sse_keep_alive))\n }\n ClientJsonRpcMessage::Notification(_)\n | ClientJsonRpcMessage::Response(_)\n | ClientJsonRpcMessage::Error(_) => {\n // handle notification\n self.session_manager\n .accept_message(&session_id, message)\n .await\n .map_err(internal_error_response(\"accept message\"))?;\n Ok(accepted_response())\n }\n _ => Ok(Response::builder()\n .status(http::StatusCode::NOT_IMPLEMENTED)\n .body(\n Full::new(Bytes::from(\"Batch requests are not supported yet\")).boxed(),\n )\n .expect(\"valid response\")),\n }\n } else {\n let (session_id, transport) = self\n .session_manager\n .create_session()\n .await\n .map_err(internal_error_response(\"create session\"))?;\n if let ClientJsonRpcMessage::Request(req) = &mut message {\n if !matches!(req.request, ClientRequest::InitializeRequest(_)) {\n return Err(unexpected_message_response(\"initialize request\"));\n }\n // inject request part to extensions\n req.request.extensions_mut().insert(part);\n } else {\n return Err(unexpected_message_response(\"initialize request\"));\n }\n let service = self\n .get_service()\n .map_err(internal_error_response(\"get service\"))?;\n // spawn a task to serve the session\n tokio::spawn({\n let session_manager = self.session_manager.clone();\n let session_id = session_id.clone();\n async move {\n let service = serve_server::(\n service, transport,\n )\n .await;\n match service {\n Ok(service) => {\n // on service created\n let _ = service.waiting().await;\n }\n Err(e) => {\n tracing::error!(\"Failed to create service: {e}\");\n }\n }\n let _ = session_manager\n .close_session(&session_id)\n .await\n .inspect_err(|e| {\n tracing::error!(\"Failed to close session {session_id}: {e}\");\n });\n }\n });\n // get initialize response\n let response = self\n .session_manager\n .initialize_session(&session_id, message)\n .await\n .map_err(internal_error_response(\"create stream\"))?;\n let mut response = sse_stream_response(\n futures::stream::once({\n async move {\n ServerSseMessage {\n event_id: None,\n message: response.into(),\n }\n }\n }),\n self.config.sse_keep_alive,\n );\n\n response.headers_mut().insert(\n HEADER_SESSION_ID,\n session_id\n .parse()\n .map_err(internal_error_response(\"create session id header\"))?,\n );\n Ok(response)\n }\n } else {\n let service = self\n .get_service()\n .map_err(internal_error_response(\"get service\"))?;\n match message {\n ClientJsonRpcMessage::Request(mut request) => {\n request.request.extensions_mut().insert(part);\n let (transport, receiver) =\n OneshotTransport::::new(ClientJsonRpcMessage::Request(request));\n let service = serve_directly(service, transport, None);\n tokio::spawn(async move {\n // on service created\n let _ = service.waiting().await;\n });\n Ok(sse_stream_response(\n ReceiverStream::new(receiver).map(|message| {\n tracing::info!(?message);\n ServerSseMessage {\n event_id: None,\n message: message.into(),\n }\n }),\n self.config.sse_keep_alive,\n ))\n }\n ClientJsonRpcMessage::Notification(_notification) => {\n // ignore\n Ok(accepted_response())\n }\n ClientJsonRpcMessage::Response(_json_rpc_response) => Ok(accepted_response()),\n ClientJsonRpcMessage::Error(_json_rpc_error) => Ok(accepted_response()),\n _ => Ok(Response::builder()\n .status(http::StatusCode::NOT_IMPLEMENTED)\n .body(Full::new(Bytes::from(\"Batch requests are not supported yet\")).boxed())\n .expect(\"valid response\")),\n }\n }\n }\n\n async fn handle_delete(&self, request: Request) -> Result\n where\n B: Body + Send + 'static,\n B::Error: Display,\n {\n // check session id\n let session_id = request\n .headers()\n .get(HEADER_SESSION_ID)\n .and_then(|v| v.to_str().ok())\n .map(|s| s.to_owned().into());\n let Some(session_id) = session_id else {\n // unauthorized\n return Ok(Response::builder()\n .status(http::StatusCode::UNAUTHORIZED)\n .body(Full::new(Bytes::from(\"Unauthorized: Session ID is required\")).boxed())\n .expect(\"valid response\"));\n };\n // close session\n self.session_manager\n .close_session(&session_id)\n .await\n .map_err(internal_error_response(\"close session\"))?;\n Ok(accepted_response())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/auth.rs", "use std::{\n collections::HashMap,\n sync::Arc,\n time::{Duration, Instant},\n};\n\nuse oauth2::{\n AuthUrl, AuthorizationCode, ClientId, ClientSecret, CsrfToken, EmptyExtraTokenFields,\n PkceCodeChallenge, PkceCodeVerifier, RedirectUrl, RefreshToken, Scope, StandardTokenResponse,\n TokenResponse, TokenUrl,\n basic::{BasicClient, BasicTokenType},\n};\nuse reqwest::{Client as HttpClient, IntoUrl, StatusCode, Url, header::AUTHORIZATION};\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\nuse tokio::sync::{Mutex, RwLock};\nuse tracing::{debug, error};\n\n/// sse client with oauth2 authorization\n#[derive(Clone)]\npub struct AuthClient {\n pub http_client: C,\n pub auth_manager: Arc>,\n}\n\nimpl std::fmt::Debug for AuthClient {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"AuthorizedClient\")\n .field(\"http_client\", &self.http_client)\n .field(\"auth_manager\", &\"...\")\n .finish()\n }\n}\n\nimpl AuthClient {\n /// create new authorized sse client\n pub fn new(http_client: C, auth_manager: AuthorizationManager) -> Self {\n Self {\n http_client,\n auth_manager: Arc::new(Mutex::new(auth_manager)),\n }\n }\n}\n\nimpl AuthClient {\n pub fn get_access_token(&self) -> impl Future> + Send {\n let auth_manager = self.auth_manager.clone();\n async move { auth_manager.lock().await.get_access_token().await }\n }\n}\n\n/// Auth error\n#[derive(Debug, Error)]\npub enum AuthError {\n #[error(\"OAuth authorization required\")]\n AuthorizationRequired,\n\n #[error(\"OAuth authorization failed: {0}\")]\n AuthorizationFailed(String),\n\n #[error(\"OAuth token exchange failed: {0}\")]\n TokenExchangeFailed(String),\n\n #[error(\"OAuth token refresh failed: {0}\")]\n TokenRefreshFailed(String),\n\n #[error(\"HTTP error: {0}\")]\n HttpError(#[from] reqwest::Error),\n\n #[error(\"OAuth error: {0}\")]\n OAuthError(String),\n\n #[error(\"Metadata error: {0}\")]\n MetadataError(String),\n\n #[error(\"URL parse error: {0}\")]\n UrlError(#[from] url::ParseError),\n\n #[error(\"No authorization support detected\")]\n NoAuthorizationSupport,\n\n #[error(\"Internal error: {0}\")]\n InternalError(String),\n\n #[error(\"Invalid token type: {0}\")]\n InvalidTokenType(String),\n\n #[error(\"Token expired\")]\n TokenExpired,\n\n #[error(\"Invalid scope: {0}\")]\n InvalidScope(String),\n\n #[error(\"Registration failed: {0}\")]\n RegistrationFailed(String),\n}\n\n/// oauth2 metadata\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct AuthorizationMetadata {\n pub authorization_endpoint: String,\n pub token_endpoint: String,\n pub registration_endpoint: String,\n pub issuer: Option,\n pub jwks_uri: Option,\n pub scopes_supported: Option>,\n // allow additional fields\n #[serde(flatten)]\n pub additional_fields: HashMap,\n}\n\n/// oauth2 client config\n#[derive(Debug, Clone)]\npub struct OAuthClientConfig {\n pub client_id: String,\n pub client_secret: Option,\n pub scopes: Vec,\n pub redirect_uri: String,\n}\n\n// add type aliases for oauth2 types\ntype OAuthErrorResponse = oauth2::StandardErrorResponse;\ntype OAuthTokenResponse = StandardTokenResponse;\ntype OAuthTokenIntrospection =\n oauth2::StandardTokenIntrospectionResponse;\ntype OAuthRevocableToken = oauth2::StandardRevocableToken;\ntype OAuthRevocationError = oauth2::StandardErrorResponse;\ntype OAuthClient = oauth2::Client<\n OAuthErrorResponse,\n OAuthTokenResponse,\n OAuthTokenIntrospection,\n OAuthRevocableToken,\n OAuthRevocationError,\n oauth2::EndpointSet,\n oauth2::EndpointNotSet,\n oauth2::EndpointNotSet,\n oauth2::EndpointNotSet,\n oauth2::EndpointSet,\n>;\ntype Credentials = (String, Option);\n\n/// oauth2 auth manager\npub struct AuthorizationManager {\n http_client: HttpClient,\n metadata: Option,\n oauth_client: Option,\n credentials: RwLock>,\n pkce_verifier: RwLock>,\n expires_at: RwLock>,\n base_url: Url,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ClientRegistrationRequest {\n pub client_name: String,\n pub redirect_uris: Vec,\n pub grant_types: Vec,\n pub token_endpoint_auth_method: String,\n pub response_types: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ClientRegistrationResponse {\n pub client_id: String,\n pub client_secret: Option,\n pub client_name: String,\n pub redirect_uris: Vec,\n // allow additional fields\n #[serde(flatten)]\n pub additional_fields: HashMap,\n}\n\nimpl AuthorizationManager {\n /// create new auth manager with base url\n pub async fn new(base_url: U) -> Result {\n let base_url = base_url.into_url()?;\n let http_client = HttpClient::builder()\n .timeout(Duration::from_secs(30))\n .build()\n .map_err(|e| AuthError::InternalError(e.to_string()))?;\n\n let manager = Self {\n http_client,\n metadata: None,\n oauth_client: None,\n credentials: RwLock::new(None),\n pkce_verifier: RwLock::new(None),\n expires_at: RwLock::new(None),\n base_url,\n };\n\n Ok(manager)\n }\n\n pub fn with_client(&mut self, http_client: HttpClient) -> Result<(), AuthError> {\n self.http_client = http_client;\n Ok(())\n }\n\n /// discover oauth2 metadata\n pub async fn discover_metadata(&self) -> Result {\n // according to the specification, the metadata should be located at \"/.well-known/oauth-authorization-server\"\n let mut discovery_url = self.base_url.clone();\n discovery_url.set_path(\"/.well-known/oauth-authorization-server\");\n debug!(\"discovery url: {:?}\", discovery_url);\n let response = self\n .http_client\n .get(discovery_url)\n .header(\"MCP-Protocol-Version\", \"2024-11-05\")\n .send()\n .await?;\n\n if response.status() == StatusCode::OK {\n let metadata = response\n .json::()\n .await\n .map_err(|e| {\n AuthError::MetadataError(format!(\"Failed to parse metadata: {}\", e))\n })?;\n debug!(\"metadata: {:?}\", metadata);\n Ok(metadata)\n } else {\n // fallback to default endpoints\n let mut auth_base = self.base_url.clone();\n // discard the path part, only keep scheme, host, port\n auth_base.set_path(\"\");\n\n Ok(AuthorizationMetadata {\n authorization_endpoint: format!(\"{}/authorize\", auth_base),\n token_endpoint: format!(\"{}/token\", auth_base),\n registration_endpoint: format!(\"{}/register\", auth_base),\n issuer: None,\n jwks_uri: None,\n scopes_supported: None,\n additional_fields: HashMap::new(),\n })\n }\n }\n\n /// get client id and credentials\n pub async fn get_credentials(&self) -> Result {\n let credentials = self.credentials.read().await;\n let client_id = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?\n .client_id();\n Ok((client_id.to_string(), credentials.clone()))\n }\n\n /// configure oauth2 client with client credentials\n pub fn configure_client(&mut self, config: OAuthClientConfig) -> Result<(), AuthError> {\n if self.metadata.is_none() {\n return Err(AuthError::NoAuthorizationSupport);\n }\n\n let metadata = self.metadata.as_ref().unwrap();\n\n let auth_url = AuthUrl::new(metadata.authorization_endpoint.clone())\n .map_err(|e| AuthError::OAuthError(format!(\"Invalid authorization URL: {}\", e)))?;\n\n let token_url = TokenUrl::new(metadata.token_endpoint.clone())\n .map_err(|e| AuthError::OAuthError(format!(\"Invalid token URL: {}\", e)))?;\n\n // debug!(\"token url: {:?}\", token_url);\n let client_id = ClientId::new(config.client_id);\n let redirect_url = RedirectUrl::new(config.redirect_uri.clone())\n .map_err(|e| AuthError::OAuthError(format!(\"Invalid re URL: {}\", e)))?;\n\n debug!(\"client_id: {:?}\", client_id);\n let mut client_builder = BasicClient::new(client_id.clone())\n .set_auth_uri(auth_url)\n .set_token_uri(token_url)\n .set_redirect_uri(redirect_url);\n\n if let Some(secret) = config.client_secret {\n client_builder = client_builder.set_client_secret(ClientSecret::new(secret));\n }\n\n self.oauth_client = Some(client_builder);\n Ok(())\n }\n\n /// dynamic register oauth2 client\n pub async fn register_client(\n &mut self,\n name: &str,\n redirect_uri: &str,\n ) -> Result {\n if self.metadata.is_none() {\n error!(\"No authorization support detected\");\n return Err(AuthError::NoAuthorizationSupport);\n }\n\n let metadata = self.metadata.as_ref().unwrap();\n let registration_url = metadata.registration_endpoint.clone();\n\n debug!(\"registration url: {:?}\", registration_url);\n // prepare registration request\n let registration_request = ClientRegistrationRequest {\n client_name: name.to_string(),\n redirect_uris: vec![redirect_uri.to_string()],\n grant_types: vec![\n \"authorization_code\".to_string(),\n \"refresh_token\".to_string(),\n ],\n token_endpoint_auth_method: \"none\".to_string(), // public client\n response_types: vec![\"code\".to_string()],\n };\n\n debug!(\"registration request: {:?}\", registration_request);\n\n let response = match self\n .http_client\n .post(registration_url)\n .json(®istration_request)\n .send()\n .await\n {\n Ok(response) => response,\n Err(e) => {\n error!(\"Registration request failed: {}\", e);\n return Err(AuthError::RegistrationFailed(format!(\n \"HTTP request error: {}\",\n e\n )));\n }\n };\n\n if !response.status().is_success() {\n let status = response.status();\n let error_text = match response.text().await {\n Ok(text) => text,\n Err(_) => \"cannot get error details\".to_string(),\n };\n\n error!(\"Registration failed: HTTP {} - {}\", status, error_text);\n return Err(AuthError::RegistrationFailed(format!(\n \"HTTP {}: {}\",\n status, error_text\n )));\n }\n\n debug!(\"registration response: {:?}\", response);\n let reg_response = match response.json::().await {\n Ok(response) => response,\n Err(e) => {\n error!(\"Failed to parse registration response: {}\", e);\n return Err(AuthError::RegistrationFailed(format!(\n \"analyze response error: {}\",\n e\n )));\n }\n };\n\n let config = OAuthClientConfig {\n client_id: reg_response.client_id,\n client_secret: reg_response.client_secret,\n redirect_uri: redirect_uri.to_string(),\n scopes: vec![],\n };\n\n self.configure_client(config.clone())?;\n Ok(config)\n }\n\n /// use provided client id to configure oauth2 client instead of dynamic registration\n /// this is useful when you have a stored client id from previous registration\n pub fn configure_client_id(&mut self, client_id: &str) -> Result<(), AuthError> {\n let config = OAuthClientConfig {\n client_id: client_id.to_string(),\n client_secret: None,\n scopes: vec![],\n redirect_uri: self.base_url.to_string(),\n };\n self.configure_client(config)\n }\n\n /// generate authorization url\n pub async fn get_authorization_url(&self, scopes: &[&str]) -> Result {\n let oauth_client = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?;\n\n // generate pkce challenge\n let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256();\n\n // build authorization request\n let mut auth_request = oauth_client\n .authorize_url(CsrfToken::new_random)\n .set_pkce_challenge(pkce_challenge);\n\n // add request scopes\n for scope in scopes {\n auth_request = auth_request.add_scope(Scope::new(scope.to_string()));\n }\n\n let (auth_url, _csrf_token) = auth_request.url();\n\n // store pkce verifier for later use\n *self.pkce_verifier.write().await = Some(pkce_verifier);\n debug!(\"set pkce verifier: {:?}\", self.pkce_verifier.read().await);\n\n Ok(auth_url.to_string())\n }\n\n /// exchange authorization code for access token\n pub async fn exchange_code_for_token(\n &self,\n code: &str,\n ) -> Result, AuthError> {\n debug!(\"start exchange code for token: {:?}\", code);\n let oauth_client = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?;\n\n let pkce_verifier = self\n .pkce_verifier\n .write()\n .await\n .take()\n .ok_or_else(|| AuthError::InternalError(\"PKCE verifier not found\".to_string()))?;\n\n let http_client = reqwest::ClientBuilder::new()\n .redirect(reqwest::redirect::Policy::none())\n .build()\n .map_err(|e| AuthError::InternalError(e.to_string()))?;\n debug!(\"client_id: {:?}\", oauth_client.client_id());\n\n // exchange token\n let token_result = oauth_client\n .exchange_code(AuthorizationCode::new(code.to_string()))\n .set_pkce_verifier(pkce_verifier)\n .request_async(&http_client)\n .await\n .map_err(|e| AuthError::TokenExchangeFailed(e.to_string()))?;\n\n // get expires_in from token response\n let expires_in = token_result.expires_in();\n if let Some(expires_in) = expires_in {\n let expires_at = Instant::now() + expires_in;\n *self.expires_at.write().await = Some(expires_at);\n }\n debug!(\"exchange token result: {:?}\", token_result);\n // store credentials\n *self.credentials.write().await = Some(token_result.clone());\n\n Ok(token_result)\n }\n\n /// get access token, if expired, refresh it automatically\n pub async fn get_access_token(&self) -> Result {\n let credentials = self.credentials.read().await;\n\n if let Some(creds) = credentials.as_ref() {\n // check if the token is expire\n if let Some(expires_at) = *self.expires_at.read().await {\n if expires_at < Instant::now() {\n // token expired, try to refresh , release the lock\n drop(credentials);\n let new_creds = self.refresh_token().await?;\n return Ok(new_creds.access_token().secret().to_string());\n }\n }\n\n Ok(creds.access_token().secret().to_string())\n } else {\n Err(AuthError::AuthorizationRequired)\n }\n }\n\n /// refresh access token\n pub async fn refresh_token(\n &self,\n ) -> Result, AuthError> {\n let oauth_client = self\n .oauth_client\n .as_ref()\n .ok_or_else(|| AuthError::InternalError(\"OAuth client not configured\".to_string()))?;\n\n let current_credentials = self\n .credentials\n .read()\n .await\n .clone()\n .ok_or_else(|| AuthError::AuthorizationRequired)?;\n\n let refresh_token = current_credentials.refresh_token().ok_or_else(|| {\n AuthError::TokenRefreshFailed(\"No refresh token available\".to_string())\n })?;\n debug!(\"refresh token: {:?}\", refresh_token);\n // refresh token\n let token_result = oauth_client\n .exchange_refresh_token(&RefreshToken::new(refresh_token.secret().to_string()))\n .request_async(&self.http_client)\n .await\n .map_err(|e| AuthError::TokenRefreshFailed(e.to_string()))?;\n\n // store new credentials\n *self.credentials.write().await = Some(token_result.clone());\n\n // get expires_in from token response\n let expires_in = token_result.expires_in();\n if let Some(expires_in) = expires_in {\n let expires_at = Instant::now() + expires_in;\n *self.expires_at.write().await = Some(expires_at);\n }\n Ok(token_result)\n }\n\n /// prepare request, add authorization header\n pub async fn prepare_request(\n &self,\n request: reqwest::RequestBuilder,\n ) -> Result {\n let token = self.get_access_token().await?;\n Ok(request.header(AUTHORIZATION, format!(\"Bearer {}\", token)))\n }\n\n /// handle response, check if need to re-authorize\n pub async fn handle_response(\n &self,\n response: reqwest::Response,\n ) -> Result {\n if response.status() == StatusCode::UNAUTHORIZED {\n // 401 Unauthorized, need to re-authorize\n Err(AuthError::AuthorizationRequired)\n } else {\n Ok(response)\n }\n }\n}\n\n/// oauth2 authorization session, for guiding user to complete the authorization process\npub struct AuthorizationSession {\n pub auth_manager: AuthorizationManager,\n pub auth_url: String,\n pub redirect_uri: String,\n}\n\nimpl AuthorizationSession {\n /// create new authorization session\n pub async fn new(\n mut auth_manager: AuthorizationManager,\n scopes: &[&str],\n redirect_uri: &str,\n ) -> Result {\n // set redirect uri\n let config = OAuthClientConfig {\n client_id: \"mcp-client\".to_string(), // temporary id, will be updated by dynamic registration\n client_secret: None,\n scopes: scopes.iter().map(|s| s.to_string()).collect(),\n redirect_uri: redirect_uri.to_string(),\n };\n\n // try to dynamic register client\n let config = match auth_manager\n .register_client(\"MCP Client\", redirect_uri)\n .await\n {\n Ok(config) => config,\n Err(e) => {\n eprintln!(\"Dynamic registration failed: {}\", e);\n // fallback to default config\n config\n }\n };\n // reset client config\n auth_manager.configure_client(config)?;\n let auth_url = auth_manager.get_authorization_url(scopes).await?;\n\n Ok(Self {\n auth_manager,\n auth_url,\n redirect_uri: redirect_uri.to_string(),\n })\n }\n\n /// get client_id and credentials\n pub async fn get_credentials(&self) -> Result {\n self.auth_manager.get_credentials().await\n }\n\n /// get authorization url\n pub fn get_authorization_url(&self) -> &str {\n &self.auth_url\n }\n\n /// handle authorization code callback\n pub async fn handle_callback(\n &self,\n code: &str,\n ) -> Result, AuthError> {\n self.auth_manager.exchange_code_for_token(code).await\n }\n}\n\n/// http client extension, automatically add authorization header\npub struct AuthorizedHttpClient {\n auth_manager: Arc,\n inner_client: HttpClient,\n}\n\nimpl AuthorizedHttpClient {\n /// create new authorized http client\n pub fn new(auth_manager: Arc, client: Option) -> Self {\n let inner_client = client.unwrap_or_default();\n Self {\n auth_manager,\n inner_client,\n }\n }\n\n /// send authorized request\n pub async fn request(\n &self,\n method: reqwest::Method,\n url: U,\n ) -> Result {\n let request = self.inner_client.request(method, url);\n self.auth_manager.prepare_request(request).await\n }\n\n /// send get request\n pub async fn get(&self, url: U) -> Result {\n let request = self.request(reqwest::Method::GET, url).await?;\n let response = request.send().await?;\n self.auth_manager.handle_response(response).await\n }\n\n /// send post request\n pub async fn post(&self, url: U) -> Result {\n self.request(reqwest::Method::POST, url).await\n }\n}\n\n/// OAuth state machine\n/// Use the OAuthState to manage the OAuth client is more recommend\n/// But also you can use the AuthorizationManager,AuthorizationSession,AuthorizedHttpClient directly\npub enum OAuthState {\n /// the AuthorizationManager\n Unauthorized(AuthorizationManager),\n /// the AuthorizationSession\n Session(AuthorizationSession),\n /// the authd AuthorizationManager\n Authorized(AuthorizationManager),\n /// the authd http client\n AuthorizedHttpClient(AuthorizedHttpClient),\n}\n\nimpl OAuthState {\n /// Create new OAuth state machine\n pub async fn new(\n base_url: U,\n client: Option,\n ) -> Result {\n let mut manager = AuthorizationManager::new(base_url).await?;\n if let Some(client) = client {\n manager.with_client(client)?;\n }\n\n Ok(OAuthState::Unauthorized(manager))\n }\n\n /// Get client_id and OAuth credentials\n pub async fn get_credentials(&self) -> Result {\n // return client_id and credentials\n match self {\n OAuthState::Unauthorized(manager) | OAuthState::Authorized(manager) => {\n manager.get_credentials().await\n }\n OAuthState::Session(session) => session.get_credentials().await,\n OAuthState::AuthorizedHttpClient(client) => client.auth_manager.get_credentials().await,\n }\n }\n\n /// Manually set credentials and move into authorized state\n /// Useful if you're caching credentials externally and wish to reuse them\n pub async fn set_credentials(\n &mut self,\n client_id: &str,\n credentials: OAuthTokenResponse,\n ) -> Result<(), AuthError> {\n if let OAuthState::Unauthorized(manager) = self {\n let mut manager = std::mem::replace(\n manager,\n AuthorizationManager::new(\"http://localhost\").await?,\n );\n\n // write credentials\n *manager.credentials.write().await = Some(credentials);\n\n // discover metadata\n let metadata = manager.discover_metadata().await?;\n manager.metadata = Some(metadata);\n\n // set client id and secret\n manager.configure_client_id(client_id)?;\n\n *self = OAuthState::Authorized(manager);\n Ok(())\n } else {\n Err(AuthError::InternalError(\n \"Cannot set credentials in this state\".to_string(),\n ))\n }\n }\n\n /// start authorization\n pub async fn start_authorization(\n &mut self,\n scopes: &[&str],\n redirect_uri: &str,\n ) -> Result<(), AuthError> {\n if let OAuthState::Unauthorized(mut manager) = std::mem::replace(\n self,\n OAuthState::Unauthorized(AuthorizationManager::new(\"http://localhost\").await?),\n ) {\n debug!(\"start discovery\");\n let metadata = manager.discover_metadata().await?;\n manager.metadata = Some(metadata);\n debug!(\"start session\");\n let session = AuthorizationSession::new(manager, scopes, redirect_uri).await?;\n *self = OAuthState::Session(session);\n Ok(())\n } else {\n Err(AuthError::InternalError(\n \"Already in session state\".to_string(),\n ))\n }\n }\n\n /// complete authorization\n pub async fn complete_authorization(&mut self) -> Result<(), AuthError> {\n if let OAuthState::Session(session) = std::mem::replace(\n self,\n OAuthState::Unauthorized(AuthorizationManager::new(\"http://localhost\").await?),\n ) {\n *self = OAuthState::Authorized(session.auth_manager);\n Ok(())\n } else {\n Err(AuthError::InternalError(\"Not in session state\".to_string()))\n }\n }\n /// covert to authorized http client\n pub async fn to_authorized_http_client(&mut self) -> Result<(), AuthError> {\n if let OAuthState::Authorized(manager) = std::mem::replace(\n self,\n OAuthState::Authorized(AuthorizationManager::new(\"http://localhost\").await?),\n ) {\n *self = OAuthState::AuthorizedHttpClient(AuthorizedHttpClient::new(\n Arc::new(manager),\n None,\n ));\n Ok(())\n } else {\n Err(AuthError::InternalError(\n \"Not in authorized state\".to_string(),\n ))\n }\n }\n /// get current authorization url\n pub async fn get_authorization_url(&self) -> Result {\n match self {\n OAuthState::Session(session) => Ok(session.get_authorization_url().to_string()),\n OAuthState::Unauthorized(_) => {\n Err(AuthError::InternalError(\"Not in session state\".to_string()))\n }\n OAuthState::Authorized(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }\n\n /// handle authorization callback\n pub async fn handle_callback(&mut self, code: &str) -> Result<(), AuthError> {\n match self {\n OAuthState::Session(session) => {\n session.handle_callback(code).await?;\n self.complete_authorization().await\n }\n OAuthState::Unauthorized(_) => {\n Err(AuthError::InternalError(\"Not in session state\".to_string()))\n }\n OAuthState::Authorized(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }\n\n /// get access token\n pub async fn get_access_token(&self) -> Result {\n match self {\n OAuthState::Unauthorized(manager) => manager.get_access_token().await,\n OAuthState::Session(_) => {\n Err(AuthError::InternalError(\"Not in manager state\".to_string()))\n }\n OAuthState::Authorized(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }\n\n /// refresh access token\n pub async fn refresh_token(&self) -> Result<(), AuthError> {\n match self {\n OAuthState::Unauthorized(_) => {\n Err(AuthError::InternalError(\"Not in manager state\".to_string()))\n }\n OAuthState::Session(_) => {\n Err(AuthError::InternalError(\"Not in manager state\".to_string()))\n }\n OAuthState::Authorized(manager) => {\n manager.refresh_token().await?;\n Ok(())\n }\n OAuthState::AuthorizedHttpClient(_) => {\n Err(AuthError::InternalError(\"Already authorized\".to_string()))\n }\n }\n }\n\n pub fn into_authorization_manager(self) -> Option {\n match self {\n OAuthState::Authorized(manager) => Some(manager),\n _ => None,\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/server_side_http.rs", "#![allow(dead_code)]\nuse std::{convert::Infallible, fmt::Display, sync::Arc, time::Duration};\n\nuse bytes::{Buf, Bytes};\nuse http::Response;\nuse http_body::Body;\nuse http_body_util::{BodyExt, Empty, Full, combinators::BoxBody};\nuse sse_stream::{KeepAlive, Sse, SseBody};\n\nuse super::http_header::EVENT_STREAM_MIME_TYPE;\nuse crate::model::{ClientJsonRpcMessage, ServerJsonRpcMessage};\n\npub type SessionId = Arc;\n\npub fn session_id() -> SessionId {\n uuid::Uuid::new_v4().to_string().into()\n}\n\npub const DEFAULT_AUTO_PING_INTERVAL: Duration = Duration::from_secs(15);\n\npub(crate) type BoxResponse = Response>;\n\npub(crate) fn accepted_response() -> Response> {\n Response::builder()\n .status(http::StatusCode::ACCEPTED)\n .body(Empty::new().boxed())\n .expect(\"valid response\")\n}\npin_project_lite::pin_project! {\n struct TokioTimer {\n #[pin]\n sleep: tokio::time::Sleep,\n }\n}\nimpl Future for TokioTimer {\n type Output = ();\n\n fn poll(\n self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll {\n let this = self.project();\n this.sleep.poll(cx)\n }\n}\nimpl sse_stream::Timer for TokioTimer {\n fn from_duration(duration: Duration) -> Self {\n Self {\n sleep: tokio::time::sleep(duration),\n }\n }\n\n fn reset(self: std::pin::Pin<&mut Self>, when: std::time::Instant) {\n let this = self.project();\n this.sleep.reset(tokio::time::Instant::from_std(when));\n }\n}\n\n#[derive(Debug, Clone)]\npub struct ServerSseMessage {\n pub event_id: Option,\n pub message: Arc,\n}\n\npub(crate) fn sse_stream_response(\n stream: impl futures::Stream + Send + Sync + 'static,\n keep_alive: Option,\n) -> Response> {\n use futures::StreamExt;\n let stream = SseBody::new(stream.map(|message| {\n let data = serde_json::to_string(&message.message).expect(\"valid message\");\n let mut sse = Sse::default().data(data);\n sse.id = message.event_id;\n Result::::Ok(sse)\n }));\n let stream = match keep_alive {\n Some(duration) => stream\n .with_keep_alive::(KeepAlive::new().interval(duration))\n .boxed(),\n None => stream.boxed(),\n };\n Response::builder()\n .status(http::StatusCode::OK)\n .header(http::header::CONTENT_TYPE, EVENT_STREAM_MIME_TYPE)\n .header(http::header::CACHE_CONTROL, \"no-cache\")\n .body(stream)\n .expect(\"valid response\")\n}\n\npub(crate) const fn internal_error_response(\n context: &str,\n) -> impl FnOnce(E) -> Response> {\n move |error| {\n tracing::error!(\"Internal server error when {context}: {error}\");\n Response::builder()\n .status(http::StatusCode::INTERNAL_SERVER_ERROR)\n .body(\n Full::new(Bytes::from(format!(\n \"Encounter an error when {context}: {error}\"\n )))\n .boxed(),\n )\n .expect(\"valid response\")\n }\n}\n\npub(crate) fn unexpected_message_response(expect: &str) -> Response> {\n Response::builder()\n .status(http::StatusCode::UNPROCESSABLE_ENTITY)\n .body(Full::new(Bytes::from(format!(\"Unexpected message, expect {expect}\"))).boxed())\n .expect(\"valid response\")\n}\n\npub(crate) async fn expect_json(\n body: B,\n) -> Result>>\nwhere\n B: Body + Send + 'static,\n B::Error: Display,\n{\n match body.collect().await {\n Ok(bytes) => {\n match serde_json::from_reader::<_, ClientJsonRpcMessage>(bytes.aggregate().reader()) {\n Ok(message) => Ok(message),\n Err(e) => {\n let response = Response::builder()\n .status(http::StatusCode::UNSUPPORTED_MEDIA_TYPE)\n .body(\n Full::new(Bytes::from(format!(\"fail to deserialize request body {e}\")))\n .boxed(),\n )\n .expect(\"valid response\");\n Err(response)\n }\n }\n }\n Err(e) => {\n let response = Response::builder()\n .status(http::StatusCode::INTERNAL_SERVER_ERROR)\n .body(Full::new(Bytes::from(format!(\"Failed to read request body: {e}\"))).boxed())\n .expect(\"valid response\");\n Err(response)\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/auth/sse_client.rs", "use http::Uri;\n\nuse crate::transport::{\n auth::AuthClient,\n sse_client::{SseClient, SseTransportError},\n};\nimpl SseClient for AuthClient\nwhere\n C: SseClient,\n{\n type Error = SseTransportError;\n\n async fn post_message(\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n mut auth_token: Option,\n ) -> Result<(), SseTransportError> {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .post_message(uri, message, auth_token)\n .await\n .map_err(SseTransportError::Client)\n }\n\n async fn get_stream(\n &self,\n uri: Uri,\n last_event_id: Option,\n mut auth_token: Option,\n ) -> Result<\n crate::transport::common::client_side_sse::BoxedSseResponse,\n SseTransportError,\n > {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .get_stream(uri, last_event_id, auth_token)\n .await\n .map_err(SseTransportError::Client)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/sse_server.rs", "use std::{collections::HashMap, io, net::SocketAddr, sync::Arc, time::Duration};\n\nuse axum::{\n Extension, Json, Router,\n extract::{NestedPath, Query, State},\n http::{StatusCode, request::Parts},\n response::{\n Response,\n sse::{Event, KeepAlive, Sse},\n },\n routing::{get, post},\n};\nuse futures::{Sink, SinkExt, Stream};\nuse tokio_stream::wrappers::ReceiverStream;\nuse tokio_util::sync::{CancellationToken, PollSender};\nuse tracing::Instrument;\n\nuse crate::{\n RoleServer, Service,\n model::ClientJsonRpcMessage,\n service::{RxJsonRpcMessage, TxJsonRpcMessage, serve_directly_with_ct},\n transport::common::server_side_http::{DEFAULT_AUTO_PING_INTERVAL, SessionId, session_id},\n};\n\ntype TxStore =\n Arc>>>;\npub type TransportReceiver = ReceiverStream>;\n\n#[derive(Clone)]\nstruct App {\n txs: TxStore,\n transport_tx: tokio::sync::mpsc::UnboundedSender,\n post_path: Arc,\n sse_ping_interval: Duration,\n}\n\nimpl App {\n pub fn new(\n post_path: String,\n sse_ping_interval: Duration,\n ) -> (\n Self,\n tokio::sync::mpsc::UnboundedReceiver,\n ) {\n let (transport_tx, transport_rx) = tokio::sync::mpsc::unbounded_channel();\n (\n Self {\n txs: Default::default(),\n transport_tx,\n post_path: post_path.into(),\n sse_ping_interval,\n },\n transport_rx,\n )\n }\n}\n\n#[derive(Debug, serde::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct PostEventQuery {\n pub session_id: String,\n}\n\nasync fn post_event_handler(\n State(app): State,\n Query(PostEventQuery { session_id }): Query,\n parts: Parts,\n Json(mut message): Json,\n) -> Result {\n tracing::debug!(session_id, ?parts, ?message, \"new client message\");\n let tx = {\n let rg = app.txs.read().await;\n rg.get(session_id.as_str())\n .ok_or(StatusCode::NOT_FOUND)?\n .clone()\n };\n message.insert_extension(parts);\n if tx.send(message).await.is_err() {\n tracing::error!(\"send message error\");\n return Err(StatusCode::GONE);\n }\n Ok(StatusCode::ACCEPTED)\n}\n\nasync fn sse_handler(\n State(app): State,\n nested_path: Option>,\n parts: Parts,\n) -> Result>>, Response> {\n let session = session_id();\n tracing::info!(%session, ?parts, \"sse connection\");\n use tokio_stream::{StreamExt, wrappers::ReceiverStream};\n use tokio_util::sync::PollSender;\n let (from_client_tx, from_client_rx) = tokio::sync::mpsc::channel(64);\n let (to_client_tx, to_client_rx) = tokio::sync::mpsc::channel(64);\n let to_client_tx_clone = to_client_tx.clone();\n\n app.txs\n .write()\n .await\n .insert(session.clone(), from_client_tx);\n let session = session.clone();\n let stream = ReceiverStream::new(from_client_rx);\n let sink = PollSender::new(to_client_tx);\n let transport = SseServerTransport {\n stream,\n sink,\n session_id: session.clone(),\n tx_store: app.txs.clone(),\n };\n let transport_send_result = app.transport_tx.send(transport);\n if transport_send_result.is_err() {\n tracing::warn!(\"send transport out error\");\n let mut response =\n Response::new(\"fail to send out transport, it seems server is closed\".to_string());\n *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;\n return Err(response);\n }\n let nested_path = nested_path.as_deref().map(NestedPath::as_str).unwrap_or(\"\");\n let post_path = app.post_path.as_ref();\n let ping_interval = app.sse_ping_interval;\n let stream = futures::stream::once(futures::future::ok(\n Event::default()\n .event(\"endpoint\")\n .data(format!(\"{nested_path}{post_path}?sessionId={session}\")),\n ))\n .chain(ReceiverStream::new(to_client_rx).map(|message| {\n match serde_json::to_string(&message) {\n Ok(bytes) => Ok(Event::default().event(\"message\").data(&bytes)),\n Err(e) => Err(io::Error::new(io::ErrorKind::InvalidData, e)),\n }\n }));\n\n tokio::spawn(async move {\n // Wait for connection closure\n to_client_tx_clone.closed().await;\n\n // Clean up session\n let session_id = session.clone();\n let tx_store = app.txs.clone();\n let mut txs = tx_store.write().await;\n txs.remove(&session_id);\n tracing::debug!(%session_id, \"Closed session and cleaned up resources\");\n });\n\n Ok(Sse::new(stream).keep_alive(KeepAlive::new().interval(ping_interval)))\n}\n\npub struct SseServerTransport {\n stream: ReceiverStream>,\n sink: PollSender>,\n session_id: SessionId,\n tx_store: TxStore,\n}\n\nimpl Sink> for SseServerTransport {\n type Error = io::Error;\n\n fn poll_ready(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.sink\n .poll_ready_unpin(cx)\n .map_err(std::io::Error::other)\n }\n\n fn start_send(\n mut self: std::pin::Pin<&mut Self>,\n item: TxJsonRpcMessage,\n ) -> Result<(), Self::Error> {\n self.sink\n .start_send_unpin(item)\n .map_err(std::io::Error::other)\n }\n\n fn poll_flush(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.sink\n .poll_flush_unpin(cx)\n .map_err(std::io::Error::other)\n }\n\n fn poll_close(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n let inner_close_result = self\n .sink\n .poll_close_unpin(cx)\n .map_err(std::io::Error::other);\n if inner_close_result.is_ready() {\n let session_id = self.session_id.clone();\n let tx_store = self.tx_store.clone();\n tokio::spawn(async move {\n tx_store.write().await.remove(&session_id);\n });\n }\n inner_close_result\n }\n}\n\nimpl Stream for SseServerTransport {\n type Item = RxJsonRpcMessage;\n\n fn poll_next(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n use futures::StreamExt;\n self.stream.poll_next_unpin(cx)\n }\n}\n\n#[derive(Debug, Clone)]\npub struct SseServerConfig {\n pub bind: SocketAddr,\n pub sse_path: String,\n pub post_path: String,\n pub ct: CancellationToken,\n pub sse_keep_alive: Option,\n}\n\n#[derive(Debug)]\npub struct SseServer {\n transport_rx: tokio::sync::mpsc::UnboundedReceiver,\n pub config: SseServerConfig,\n}\n\nimpl SseServer {\n pub async fn serve(bind: SocketAddr) -> io::Result {\n Self::serve_with_config(SseServerConfig {\n bind,\n sse_path: \"/sse\".to_string(),\n post_path: \"/message\".to_string(),\n ct: CancellationToken::new(),\n sse_keep_alive: None,\n })\n .await\n }\n pub async fn serve_with_config(config: SseServerConfig) -> io::Result {\n let (sse_server, service) = Self::new(config);\n let listener = tokio::net::TcpListener::bind(sse_server.config.bind).await?;\n let ct = sse_server.config.ct.child_token();\n let server = axum::serve(listener, service).with_graceful_shutdown(async move {\n ct.cancelled().await;\n tracing::info!(\"sse server cancelled\");\n });\n tokio::spawn(\n async move {\n if let Err(e) = server.await {\n tracing::error!(error = %e, \"sse server shutdown with error\");\n }\n }\n .instrument(tracing::info_span!(\"sse-server\", bind_address = %sse_server.config.bind)),\n );\n Ok(sse_server)\n }\n\n pub fn new(config: SseServerConfig) -> (SseServer, Router) {\n let (app, transport_rx) = App::new(\n config.post_path.clone(),\n config.sse_keep_alive.unwrap_or(DEFAULT_AUTO_PING_INTERVAL),\n );\n let router = Router::new()\n .route(&config.sse_path, get(sse_handler))\n .route(&config.post_path, post(post_event_handler))\n .with_state(app);\n\n let server = SseServer {\n transport_rx,\n config,\n };\n\n (server, router)\n }\n\n pub fn with_service(mut self, service_provider: F) -> CancellationToken\n where\n S: Service,\n F: Fn() -> S + Send + 'static,\n {\n use crate::service::ServiceExt;\n let ct = self.config.ct.clone();\n tokio::spawn(async move {\n while let Some(transport) = self.next_transport().await {\n let service = service_provider();\n let ct = self.config.ct.child_token();\n tokio::spawn(async move {\n let server = service\n .serve_with_ct(transport, ct)\n .await\n .map_err(std::io::Error::other)?;\n server.waiting().await?;\n tokio::io::Result::Ok(())\n });\n }\n });\n ct\n }\n\n /// This allows you to skip the initialization steps for incoming request.\n pub fn with_service_directly(mut self, service_provider: F) -> CancellationToken\n where\n S: Service,\n F: Fn() -> S + Send + 'static,\n {\n let ct = self.config.ct.clone();\n tokio::spawn(async move {\n while let Some(transport) = self.next_transport().await {\n let service = service_provider();\n let ct = self.config.ct.child_token();\n tokio::spawn(async move {\n let server = serve_directly_with_ct(service, transport, None, ct);\n server.waiting().await?;\n tokio::io::Result::Ok(())\n });\n }\n });\n ct\n }\n\n pub fn cancel(&self) {\n self.config.ct.cancel();\n }\n\n pub async fn next_transport(&mut self) -> Option {\n self.transport_rx.recv().await\n }\n}\n\nimpl Stream for SseServer {\n type Item = SseServerTransport;\n\n fn poll_next(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.transport_rx.poll_recv(cx)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/session/local.rs", "use std::{\n collections::{HashMap, HashSet, VecDeque},\n num::ParseIntError,\n sync::Arc,\n time::Duration,\n};\n\nuse futures::Stream;\nuse thiserror::Error;\nuse tokio::sync::{\n mpsc::{Receiver, Sender},\n oneshot,\n};\nuse tokio_stream::wrappers::ReceiverStream;\nuse tracing::instrument;\n\nuse crate::{\n RoleServer,\n model::{\n CancelledNotificationParam, ClientJsonRpcMessage, ClientNotification, ClientRequest,\n JsonRpcNotification, JsonRpcRequest, Notification, ProgressNotificationParam,\n ProgressToken, RequestId, ServerJsonRpcMessage, ServerNotification,\n },\n transport::{\n WorkerTransport,\n common::server_side_http::{SessionId, session_id},\n worker::{Worker, WorkerContext, WorkerQuitReason, WorkerSendRequest},\n },\n};\n\n#[derive(Debug, Default)]\npub struct LocalSessionManager {\n pub sessions: tokio::sync::RwLock>,\n pub session_config: SessionConfig,\n}\n\n#[derive(Debug, Error)]\npub enum LocalSessionManagerError {\n #[error(\"Session not found: {0}\")]\n SessionNotFound(SessionId),\n #[error(\"Session error: {0}\")]\n SessionError(#[from] SessionError),\n #[error(\"Invalid event id: {0}\")]\n InvalidEventId(#[from] EventIdParseError),\n}\nimpl SessionManager for LocalSessionManager {\n type Error = LocalSessionManagerError;\n type Transport = WorkerTransport;\n async fn create_session(&self) -> Result<(SessionId, Self::Transport), Self::Error> {\n let id = session_id();\n let (handle, worker) = create_local_session(id.clone(), self.session_config.clone());\n self.sessions.write().await.insert(id.clone(), handle);\n Ok((id, WorkerTransport::spawn(worker)))\n }\n async fn initialize_session(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> Result {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let response = handle.initialize(message).await?;\n Ok(response)\n }\n async fn close_session(&self, id: &SessionId) -> Result<(), Self::Error> {\n let mut sessions = self.sessions.write().await;\n if let Some(handle) = sessions.remove(id) {\n handle.close().await?;\n }\n Ok(())\n }\n async fn has_session(&self, id: &SessionId) -> Result {\n let sessions = self.sessions.read().await;\n Ok(sessions.contains_key(id))\n }\n async fn create_stream(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> Result + Send + 'static, Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let receiver = handle.establish_request_wise_channel().await?;\n handle\n .push_message(message, receiver.http_request_id)\n .await?;\n Ok(ReceiverStream::new(receiver.inner))\n }\n\n async fn create_standalone_stream(\n &self,\n id: &SessionId,\n ) -> Result + Send + 'static, Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let receiver = handle.establish_common_channel().await?;\n Ok(ReceiverStream::new(receiver.inner))\n }\n\n async fn resume(\n &self,\n id: &SessionId,\n last_event_id: String,\n ) -> Result + Send + 'static, Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n let receiver = handle.resume(last_event_id.parse()?).await?;\n Ok(ReceiverStream::new(receiver.inner))\n }\n\n async fn accept_message(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> Result<(), Self::Error> {\n let sessions = self.sessions.read().await;\n let handle = sessions\n .get(id)\n .ok_or(LocalSessionManagerError::SessionNotFound(id.clone()))?;\n handle.push_message(message, None).await?;\n Ok(())\n }\n}\n\n/// `/request_id>`\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct EventId {\n http_request_id: Option,\n index: usize,\n}\n\nimpl std::fmt::Display for EventId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.index)?;\n match &self.http_request_id {\n Some(http_request_id) => write!(f, \"/{http_request_id}\"),\n None => write!(f, \"\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Error)]\npub enum EventIdParseError {\n #[error(\"Invalid index: {0}\")]\n InvalidIndex(ParseIntError),\n #[error(\"Invalid numeric request id: {0}\")]\n InvalidNumericRequestId(ParseIntError),\n #[error(\"Missing request id type\")]\n InvalidRequestIdType,\n #[error(\"Missing request id\")]\n MissingRequestId,\n}\n\nimpl std::str::FromStr for EventId {\n type Err = EventIdParseError;\n fn from_str(s: &str) -> Result {\n if let Some((index, request_id)) = s.split_once(\"/\") {\n let index = usize::from_str(index).map_err(EventIdParseError::InvalidIndex)?;\n let request_id = u64::from_str(request_id).map_err(EventIdParseError::InvalidIndex)?;\n Ok(EventId {\n http_request_id: Some(request_id),\n index,\n })\n } else {\n let index = usize::from_str(s).map_err(EventIdParseError::InvalidIndex)?;\n Ok(EventId {\n http_request_id: None,\n index,\n })\n }\n }\n}\n\nuse super::{ServerSseMessage, SessionManager};\n\nstruct CachedTx {\n tx: Sender,\n cache: VecDeque,\n http_request_id: Option,\n capacity: usize,\n}\n\nimpl CachedTx {\n fn new(tx: Sender, http_request_id: Option) -> Self {\n Self {\n cache: VecDeque::with_capacity(tx.capacity()),\n capacity: tx.capacity(),\n tx,\n http_request_id,\n }\n }\n fn new_common(tx: Sender) -> Self {\n Self::new(tx, None)\n }\n\n async fn send(&mut self, message: ServerJsonRpcMessage) {\n let index = self.cache.back().map_or(0, |m| {\n m.event_id\n .as_deref()\n .unwrap_or_default()\n .parse::()\n .expect(\"valid event id\")\n .index\n + 1\n });\n let event_id = EventId {\n http_request_id: self.http_request_id,\n index,\n };\n let message = ServerSseMessage {\n event_id: Some(event_id.to_string()),\n message: Arc::new(message),\n };\n if self.cache.len() >= self.capacity {\n self.cache.pop_front();\n self.cache.push_back(message.clone());\n } else {\n self.cache.push_back(message.clone());\n }\n let _ = self.tx.send(message).await.inspect_err(|e| {\n let event_id = &e.0.event_id;\n tracing::trace!(?event_id, \"trying to send message in a closed session\")\n });\n }\n\n async fn sync(&mut self, index: usize) -> Result<(), SessionError> {\n let Some(front) = self.cache.front() else {\n return Ok(());\n };\n let front_event_id = front\n .event_id\n .as_deref()\n .unwrap_or_default()\n .parse::()?;\n let sync_index = index.saturating_sub(front_event_id.index);\n if sync_index > self.cache.len() {\n // invalid index\n return Err(SessionError::InvalidEventId);\n }\n for message in self.cache.iter().skip(sync_index) {\n let send_result = self.tx.send(message.clone()).await;\n if send_result.is_err() {\n let event_id: EventId = message.event_id.as_deref().unwrap_or_default().parse()?;\n return Err(SessionError::ChannelClosed(Some(event_id.index as u64)));\n }\n }\n Ok(())\n }\n}\n\nstruct HttpRequestWise {\n resources: HashSet,\n tx: CachedTx,\n}\n\ntype HttpRequestId = u64;\n#[derive(Debug, Clone, Hash, PartialEq, Eq)]\nenum ResourceKey {\n McpRequestId(RequestId),\n ProgressToken(ProgressToken),\n}\n\npub struct LocalSessionWorker {\n id: SessionId,\n next_http_request_id: HttpRequestId,\n tx_router: HashMap,\n resource_router: HashMap,\n common: CachedTx,\n event_rx: Receiver,\n session_config: SessionConfig,\n}\n\nimpl LocalSessionWorker {\n pub fn id(&self) -> &SessionId {\n &self.id\n }\n}\n\n#[derive(Debug, Error)]\npub enum SessionError {\n #[error(\"Invalid request id: {0}\")]\n DuplicatedRequestId(HttpRequestId),\n #[error(\"Channel closed: {0:?}\")]\n ChannelClosed(Option),\n #[error(\"Cannot parse event id: {0}\")]\n EventIdParseError(#[from] EventIdParseError),\n #[error(\"Session service terminated\")]\n SessionServiceTerminated,\n #[error(\"Invalid event id\")]\n InvalidEventId,\n #[error(\"Transport closed\")]\n TransportClosed,\n #[error(\"IO error: {0}\")]\n Io(#[from] std::io::Error),\n #[error(\"Tokio join error {0}\")]\n TokioJoinError(#[from] tokio::task::JoinError),\n}\n\nimpl From for std::io::Error {\n fn from(value: SessionError) -> Self {\n match value {\n SessionError::Io(io) => io,\n _ => std::io::Error::new(std::io::ErrorKind::Other, format!(\"Session error: {value}\")),\n }\n }\n}\n\nenum OutboundChannel {\n RequestWise { id: HttpRequestId, close: bool },\n Common,\n}\n\npub struct StreamableHttpMessageReceiver {\n pub http_request_id: Option,\n pub inner: Receiver,\n}\n\nimpl LocalSessionWorker {\n fn unregister_resource(&mut self, resource: &ResourceKey) {\n if let Some(http_request_id) = self.resource_router.remove(resource) {\n tracing::trace!(?resource, http_request_id, \"unregister resource\");\n if let Some(channel) = self.tx_router.get_mut(&http_request_id) {\n // It's okey to do so, since we don't handle batch json rpc request anymore\n // and this can be refactored after the batch request is removed in the coming version.\n if channel.resources.is_empty() || matches!(resource, ResourceKey::McpRequestId(_))\n {\n tracing::debug!(http_request_id, \"close http request wise channel\");\n if let Some(channel) = self.tx_router.remove(&http_request_id) {\n for resource in channel.resources {\n self.resource_router.remove(&resource);\n }\n }\n }\n } else {\n tracing::warn!(http_request_id, \"http request wise channel not found\");\n }\n }\n }\n fn register_resource(&mut self, resource: ResourceKey, http_request_id: HttpRequestId) {\n tracing::trace!(?resource, http_request_id, \"register resource\");\n if let Some(channel) = self.tx_router.get_mut(&http_request_id) {\n channel.resources.insert(resource.clone());\n self.resource_router.insert(resource, http_request_id);\n }\n }\n fn register_request(\n &mut self,\n request: &JsonRpcRequest,\n http_request_id: HttpRequestId,\n ) {\n use crate::model::GetMeta;\n self.register_resource(\n ResourceKey::McpRequestId(request.id.clone()),\n http_request_id,\n );\n if let Some(progress_token) = request.request.get_meta().get_progress_token() {\n self.register_resource(\n ResourceKey::ProgressToken(progress_token.clone()),\n http_request_id,\n );\n }\n }\n fn catch_cancellation_notification(\n &mut self,\n notification: &JsonRpcNotification,\n ) {\n if let ClientNotification::CancelledNotification(n) = ¬ification.notification {\n let request_id = n.params.request_id.clone();\n let resource = ResourceKey::McpRequestId(request_id);\n self.unregister_resource(&resource);\n }\n }\n fn next_http_request_id(&mut self) -> HttpRequestId {\n let id = self.next_http_request_id;\n self.next_http_request_id = self.next_http_request_id.wrapping_add(1);\n id\n }\n async fn establish_request_wise_channel(\n &mut self,\n ) -> Result {\n let http_request_id = self.next_http_request_id();\n let (tx, rx) = tokio::sync::mpsc::channel(self.session_config.channel_capacity);\n self.tx_router.insert(\n http_request_id,\n HttpRequestWise {\n resources: Default::default(),\n tx: CachedTx::new(tx, Some(http_request_id)),\n },\n );\n tracing::debug!(http_request_id, \"establish new request wise channel\");\n Ok(StreamableHttpMessageReceiver {\n http_request_id: Some(http_request_id),\n inner: rx,\n })\n }\n fn resolve_outbound_channel(&self, message: &ServerJsonRpcMessage) -> OutboundChannel {\n match &message {\n ServerJsonRpcMessage::Request(_) => OutboundChannel::Common,\n ServerJsonRpcMessage::Notification(JsonRpcNotification {\n notification:\n ServerNotification::ProgressNotification(Notification {\n params: ProgressNotificationParam { progress_token, .. },\n ..\n }),\n ..\n }) => {\n let id = self\n .resource_router\n .get(&ResourceKey::ProgressToken(progress_token.clone()));\n\n if let Some(id) = id {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::Notification(JsonRpcNotification {\n notification:\n ServerNotification::CancelledNotification(Notification {\n params: CancelledNotificationParam { request_id, .. },\n ..\n }),\n ..\n }) => {\n if let Some(id) = self\n .resource_router\n .get(&ResourceKey::McpRequestId(request_id.clone()))\n {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::Notification(_) => OutboundChannel::Common,\n ServerJsonRpcMessage::Response(json_rpc_response) => {\n if let Some(id) = self\n .resource_router\n .get(&ResourceKey::McpRequestId(json_rpc_response.id.clone()))\n {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::Error(json_rpc_error) => {\n if let Some(id) = self\n .resource_router\n .get(&ResourceKey::McpRequestId(json_rpc_error.id.clone()))\n {\n OutboundChannel::RequestWise {\n id: *id,\n close: false,\n }\n } else {\n OutboundChannel::Common\n }\n }\n ServerJsonRpcMessage::BatchRequest(_) | ServerJsonRpcMessage::BatchResponse(_) => {\n // the server side should never yield a batch request or response now\n unreachable!(\"server side won't yield batch request or response\")\n }\n }\n }\n async fn handle_server_message(\n &mut self,\n message: ServerJsonRpcMessage,\n ) -> Result<(), SessionError> {\n let outbound_channel = self.resolve_outbound_channel(&message);\n match outbound_channel {\n OutboundChannel::RequestWise { id, close } => {\n if let Some(request_wise) = self.tx_router.get_mut(&id) {\n request_wise.tx.send(message).await;\n if close {\n self.tx_router.remove(&id);\n }\n } else {\n return Err(SessionError::ChannelClosed(Some(id)));\n }\n }\n OutboundChannel::Common => self.common.send(message).await,\n }\n Ok(())\n }\n async fn resume(\n &mut self,\n last_event_id: EventId,\n ) -> Result {\n match last_event_id.http_request_id {\n Some(http_request_id) => {\n let request_wise = self\n .tx_router\n .get_mut(&http_request_id)\n .ok_or(SessionError::ChannelClosed(Some(http_request_id)))?;\n let channel = tokio::sync::mpsc::channel(self.session_config.channel_capacity);\n let (tx, rx) = channel;\n request_wise.tx.tx = tx;\n let index = last_event_id.index;\n // sync messages after index\n request_wise.tx.sync(index).await?;\n Ok(StreamableHttpMessageReceiver {\n http_request_id: Some(http_request_id),\n inner: rx,\n })\n }\n None => {\n let channel = tokio::sync::mpsc::channel(self.session_config.channel_capacity);\n let (tx, rx) = channel;\n self.common.tx = tx;\n let index = last_event_id.index;\n // sync messages after index\n self.common.sync(index).await?;\n Ok(StreamableHttpMessageReceiver {\n http_request_id: None,\n inner: rx,\n })\n }\n }\n }\n}\n\nenum SessionEvent {\n ClientMessage {\n message: ClientJsonRpcMessage,\n http_request_id: Option,\n },\n EstablishRequestWiseChannel {\n responder: oneshot::Sender>,\n },\n CloseRequestWiseChannel {\n id: HttpRequestId,\n responder: oneshot::Sender>,\n },\n Resume {\n last_event_id: EventId,\n responder: oneshot::Sender>,\n },\n InitializeRequest {\n request: ClientJsonRpcMessage,\n responder: oneshot::Sender>,\n },\n Close,\n}\n\n#[derive(Debug, Clone)]\npub enum SessionQuitReason {\n ServiceTerminated,\n ClientTerminated,\n ExpectInitializeRequest,\n ExpectInitializeResponse,\n Cancelled,\n}\n\n#[derive(Debug, Clone)]\npub struct LocalSessionHandle {\n id: SessionId,\n // after all event_tx drop, inner task will be terminated\n event_tx: Sender,\n}\n\nimpl LocalSessionHandle {\n /// Get the session id\n pub fn id(&self) -> &SessionId {\n &self.id\n }\n\n /// Close the session\n pub async fn close(&self) -> Result<(), SessionError> {\n self.event_tx\n .send(SessionEvent::Close)\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n Ok(())\n }\n\n /// Send a message to the session\n pub async fn push_message(\n &self,\n message: ClientJsonRpcMessage,\n http_request_id: Option,\n ) -> Result<(), SessionError> {\n self.event_tx\n .send(SessionEvent::ClientMessage {\n message,\n http_request_id,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n Ok(())\n }\n\n /// establish a channel for a http-request, the corresponded message from server will be\n /// sent through this channel. The channel will be closed when the request is completed,\n /// or you can close it manually by calling [`LocalSessionHandle::close_request_wise_channel`].\n pub async fn establish_request_wise_channel(\n &self,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::EstablishRequestWiseChannel { responder: tx })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// close the http-request wise channel.\n pub async fn close_request_wise_channel(\n &self,\n request_id: HttpRequestId,\n ) -> Result<(), SessionError> {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::CloseRequestWiseChannel {\n id: request_id,\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// Establish a common channel for general purpose messages.\n pub async fn establish_common_channel(\n &self,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::Resume {\n last_event_id: EventId {\n http_request_id: None,\n index: 0,\n },\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// Resume streaming response by the last event id. This is suitable for both request wise and common channel.\n pub async fn resume(\n &self,\n last_event_id: EventId,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::Resume {\n last_event_id,\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n\n /// Send an initialize request to the session. And wait for the initialized response.\n ///\n /// This is used to establish a session with the server.\n pub async fn initialize(\n &self,\n request: ClientJsonRpcMessage,\n ) -> Result {\n let (tx, rx) = tokio::sync::oneshot::channel();\n self.event_tx\n .send(SessionEvent::InitializeRequest {\n request,\n responder: tx,\n })\n .await\n .map_err(|_| SessionError::SessionServiceTerminated)?;\n rx.await\n .map_err(|_| SessionError::SessionServiceTerminated)?\n }\n}\n\npub type SessionTransport = WorkerTransport;\n\nimpl Worker for LocalSessionWorker {\n type Error = SessionError;\n type Role = RoleServer;\n fn err_closed() -> Self::Error {\n SessionError::TransportClosed\n }\n fn err_join(e: tokio::task::JoinError) -> Self::Error {\n SessionError::TokioJoinError(e)\n }\n fn config(&self) -> crate::transport::worker::WorkerConfig {\n crate::transport::worker::WorkerConfig {\n name: Some(format!(\"streamable-http-session-{}\", self.id)),\n channel_buffer_capacity: self.session_config.channel_capacity,\n }\n }\n #[instrument(name = \"streamable_http_session\", skip_all, fields(id = self.id.as_ref()))]\n async fn run(mut self, mut context: WorkerContext) -> Result<(), WorkerQuitReason> {\n enum InnerEvent {\n FromHttpService(SessionEvent),\n FromHandler(WorkerSendRequest),\n }\n // waiting for initialize request\n let evt = self.event_rx.recv().await.ok_or_else(|| {\n WorkerQuitReason::fatal(\"transport terminated\", \"get initialize request\")\n })?;\n let SessionEvent::InitializeRequest { request, responder } = evt else {\n return Err(WorkerQuitReason::fatal(\n \"unexpected message\",\n \"get initialize request\",\n ));\n };\n context.send_to_handler(request).await?;\n let send_initialize_response = context.recv_from_handler().await?;\n responder\n .send(Ok(send_initialize_response.message))\n .map_err(|_| {\n WorkerQuitReason::fatal(\n \"failed to send initialize response to http service\",\n \"send initialize response\",\n )\n })?;\n send_initialize_response\n .responder\n .send(Ok(()))\n .map_err(|_| WorkerQuitReason::HandlerTerminated)?;\n let ct = context.cancellation_token.clone();\n let keep_alive = self.session_config.keep_alive.unwrap_or(Duration::MAX);\n loop {\n let keep_alive_timeout = tokio::time::sleep(keep_alive);\n let event = tokio::select! {\n event = self.event_rx.recv() => {\n if let Some(event) = event {\n InnerEvent::FromHttpService(event)\n } else {\n return Err(WorkerQuitReason::fatal(\"session dropped\", \"waiting next session event\"))\n }\n },\n from_handler = context.recv_from_handler() => {\n InnerEvent::FromHandler(from_handler?)\n }\n _ = ct.cancelled() => {\n return Err(WorkerQuitReason::Cancelled)\n }\n _ = keep_alive_timeout => {\n return Err(WorkerQuitReason::fatal(\"keep live timeout\", \"poll next session event\"))\n }\n };\n match event {\n InnerEvent::FromHandler(WorkerSendRequest { message, responder }) => {\n // catch response\n let to_unregister = match &message {\n crate::model::JsonRpcMessage::Response(json_rpc_response) => {\n let request_id = json_rpc_response.id.clone();\n Some(ResourceKey::McpRequestId(request_id))\n }\n crate::model::JsonRpcMessage::Error(json_rpc_error) => {\n let request_id = json_rpc_error.id.clone();\n Some(ResourceKey::McpRequestId(request_id))\n }\n _ => {\n None\n // no need to unregister resource\n }\n };\n let handle_result = self.handle_server_message(message).await;\n let _ = responder.send(handle_result).inspect_err(|error| {\n tracing::warn!(?error, \"failed to send message to http service handler\");\n });\n if let Some(to_unregister) = to_unregister {\n self.unregister_resource(&to_unregister);\n }\n }\n InnerEvent::FromHttpService(SessionEvent::ClientMessage {\n message: json_rpc_message,\n http_request_id,\n }) => {\n match &json_rpc_message {\n crate::model::JsonRpcMessage::Request(request) => {\n if let Some(http_request_id) = http_request_id {\n self.register_request(request, http_request_id)\n }\n }\n crate::model::JsonRpcMessage::Notification(notification) => {\n self.catch_cancellation_notification(notification)\n }\n crate::model::JsonRpcMessage::BatchRequest(items) => {\n for r in items {\n match r {\n crate::model::JsonRpcBatchRequestItem::Request(request) => {\n if let Some(http_request_id) = http_request_id {\n self.register_request(request, http_request_id)\n }\n }\n crate::model::JsonRpcBatchRequestItem::Notification(\n notification,\n ) => self.catch_cancellation_notification(notification),\n }\n }\n }\n _ => {}\n }\n context.send_to_handler(json_rpc_message).await?;\n }\n InnerEvent::FromHttpService(SessionEvent::EstablishRequestWiseChannel {\n responder,\n }) => {\n let handle_result = self.establish_request_wise_channel().await;\n let _ = responder.send(handle_result);\n }\n InnerEvent::FromHttpService(SessionEvent::CloseRequestWiseChannel {\n id,\n responder,\n }) => {\n let _handle_result = self.tx_router.remove(&id);\n let _ = responder.send(Ok(()));\n }\n InnerEvent::FromHttpService(SessionEvent::Resume {\n last_event_id,\n responder,\n }) => {\n let handle_result = self.resume(last_event_id).await;\n let _ = responder.send(handle_result);\n }\n InnerEvent::FromHttpService(SessionEvent::Close) => {\n return Err(WorkerQuitReason::TransportClosed);\n }\n _ => {\n // ignore\n }\n }\n }\n }\n}\n\n#[derive(Debug, Clone)]\npub struct SessionConfig {\n /// the capacity of the channel for the session. Default is 16.\n pub channel_capacity: usize,\n /// if set, the session will be closed after this duration of inactivity.\n pub keep_alive: Option,\n}\n\nimpl SessionConfig {\n pub const DEFAULT_CHANNEL_CAPACITY: usize = 16;\n}\n\nimpl Default for SessionConfig {\n fn default() -> Self {\n Self {\n channel_capacity: Self::DEFAULT_CHANNEL_CAPACITY,\n keep_alive: None,\n }\n }\n}\n\n/// Create a new session with the given id and configuration.\n///\n/// This function will return a pair of [`LocalSessionHandle`] and [`LocalSessionWorker`].\n///\n/// You can run the [`LocalSessionWorker`] as a transport for mcp server. And use the [`LocalSessionHandle`] operate the session.\npub fn create_local_session(\n id: impl Into,\n config: SessionConfig,\n) -> (LocalSessionHandle, LocalSessionWorker) {\n let id = id.into();\n let (event_tx, event_rx) = tokio::sync::mpsc::channel(config.channel_capacity);\n let (common_tx, _) = tokio::sync::mpsc::channel(config.channel_capacity);\n let common = CachedTx::new_common(common_tx);\n tracing::info!(session_id = ?id, \"create new session\");\n let handle = LocalSessionHandle {\n event_tx,\n id: id.clone(),\n };\n let session_worker = LocalSessionWorker {\n next_http_request_id: 0,\n id,\n tx_router: HashMap::new(),\n resource_router: HashMap::new(),\n common,\n event_rx,\n session_config: config.clone(),\n };\n (handle, session_worker)\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/auth/streamable_http_client.rs", "use crate::transport::{\n auth::AuthClient,\n streamable_http_client::{StreamableHttpClient, StreamableHttpError},\n};\nimpl StreamableHttpClient for AuthClient\nwhere\n C: StreamableHttpClient + Send + Sync,\n{\n type Error = StreamableHttpError;\n\n async fn delete_session(\n &self,\n uri: std::sync::Arc,\n session_id: std::sync::Arc,\n mut auth_token: Option,\n ) -> Result<(), crate::transport::streamable_http_client::StreamableHttpError>\n {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .delete_session(uri, session_id, auth_token)\n .await\n .map_err(StreamableHttpError::Client)\n }\n\n async fn get_stream(\n &self,\n uri: std::sync::Arc,\n session_id: std::sync::Arc,\n last_event_id: Option,\n mut auth_token: Option,\n ) -> Result<\n futures::stream::BoxStream<'static, Result>,\n crate::transport::streamable_http_client::StreamableHttpError,\n > {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .get_stream(uri, session_id, last_event_id, auth_token)\n .await\n .map_err(StreamableHttpError::Client)\n }\n\n async fn post_message(\n &self,\n uri: std::sync::Arc,\n message: crate::model::ClientJsonRpcMessage,\n session_id: Option>,\n mut auth_token: Option,\n ) -> Result<\n crate::transport::streamable_http_client::StreamableHttpPostResponse,\n StreamableHttpError,\n > {\n if auth_token.is_none() {\n auth_token = Some(self.get_access_token().await?);\n }\n self.http_client\n .post_message(uri, message, session_id, auth_token)\n .await\n .map_err(StreamableHttpError::Client)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/client_side_sse.rs", "use std::{\n pin::Pin,\n sync::Arc,\n task::{Poll, ready},\n time::Duration,\n};\n\nuse futures::{Stream, stream::BoxStream};\nuse sse_stream::{Error as SseError, Sse};\n\nuse crate::model::ServerJsonRpcMessage;\n\npub type BoxedSseResponse = BoxStream<'static, Result>;\n\npub trait SseRetryPolicy: std::fmt::Debug + Send + Sync {\n fn retry(&self, current_times: usize) -> Option;\n}\n\n#[derive(Debug, Clone)]\npub struct FixedInterval {\n pub max_times: Option,\n pub duration: Duration,\n}\n\nimpl SseRetryPolicy for FixedInterval {\n fn retry(&self, current_times: usize) -> Option {\n if let Some(max_times) = self.max_times {\n if current_times >= max_times {\n return None;\n }\n }\n Some(self.duration)\n }\n}\n\nimpl FixedInterval {\n pub const DEFAULT_MIN_DURATION: Duration = Duration::from_millis(1000);\n}\n\nimpl Default for FixedInterval {\n fn default() -> Self {\n Self {\n max_times: None,\n duration: Self::DEFAULT_MIN_DURATION,\n }\n }\n}\n\n#[derive(Debug, Clone)]\npub struct ExponentialBackoff {\n pub max_times: Option,\n pub base_duration: Duration,\n}\n\nimpl ExponentialBackoff {\n pub const DEFAULT_DURATION: Duration = Duration::from_millis(1000);\n}\n\nimpl Default for ExponentialBackoff {\n fn default() -> Self {\n Self {\n max_times: None,\n base_duration: Self::DEFAULT_DURATION,\n }\n }\n}\n\nimpl SseRetryPolicy for ExponentialBackoff {\n fn retry(&self, current_times: usize) -> Option {\n if let Some(max_times) = self.max_times {\n if current_times >= max_times {\n return None;\n }\n }\n Some(self.base_duration * (2u32.pow(current_times as u32)))\n }\n}\n\n#[derive(Debug, Clone, Copy, Default)]\npub struct NeverRetry;\n\nimpl SseRetryPolicy for NeverRetry {\n fn retry(&self, _current_times: usize) -> Option {\n None\n }\n}\n\n#[derive(Debug, Default)]\npub struct NeverReconnect {\n error: Option,\n}\n\nimpl SseStreamReconnect for NeverReconnect {\n type Error = E;\n type Future = futures::future::Ready>;\n fn retry_connection(&mut self, _last_event_id: Option<&str>) -> Self::Future {\n futures::future::ready(Err(self.error.take().expect(\"should not be called again\")))\n }\n}\n\npub(crate) trait SseStreamReconnect {\n type Error: std::error::Error;\n type Future: Future> + Send;\n fn retry_connection(&mut self, last_event_id: Option<&str>) -> Self::Future;\n}\n\npin_project_lite::pin_project! {\n pub(crate) struct SseAutoReconnectStream\n where R: SseStreamReconnect\n {\n retry_policy: Arc,\n last_event_id: Option,\n server_retry_interval: Option,\n connector: R,\n #[pin]\n state: SseAutoReconnectStreamState,\n }\n}\n\nimpl SseAutoReconnectStream {\n pub fn new(\n stream: BoxedSseResponse,\n connector: R,\n retry_policy: Arc,\n ) -> Self {\n Self {\n retry_policy,\n last_event_id: None,\n server_retry_interval: None,\n connector,\n state: SseAutoReconnectStreamState::Connected { stream },\n }\n }\n}\n\nimpl SseAutoReconnectStream> {\n #[allow(dead_code)]\n pub(crate) fn never_reconnect(stream: BoxedSseResponse, error_when_reconnect: E) -> Self {\n Self {\n retry_policy: Arc::new(NeverRetry),\n last_event_id: None,\n server_retry_interval: None,\n connector: NeverReconnect {\n error: Some(error_when_reconnect),\n },\n state: SseAutoReconnectStreamState::Connected { stream },\n }\n }\n}\n\npin_project_lite::pin_project! {\n #[project = SseAutoReconnectStreamStateProj]\n pub enum SseAutoReconnectStreamState {\n Connected {\n #[pin]\n stream: BoxedSseResponse,\n },\n Retrying {\n retry_times: usize,\n #[pin]\n retrying: F,\n },\n WaitingNextRetry {\n #[pin]\n sleep: tokio::time::Sleep,\n retry_times: usize,\n },\n Terminated,\n }\n}\n\nimpl Stream for SseAutoReconnectStream\nwhere\n R: SseStreamReconnect,\n{\n type Item = Result;\n fn poll_next(\n mut self: Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> Poll> {\n let mut this = self.as_mut().project();\n // let this_state = this.state.as_mut().project()\n let state = this.state.as_mut().project();\n let next_state = match state {\n SseAutoReconnectStreamStateProj::Connected { stream } => {\n match ready!(stream.poll_next(cx)) {\n Some(Ok(sse)) => {\n if let Some(new_server_retry) = sse.retry {\n *this.server_retry_interval =\n Some(Duration::from_millis(new_server_retry));\n }\n if let Some(event_id) = sse.id {\n *this.last_event_id = Some(event_id);\n }\n if let Some(data) = sse.data {\n match serde_json::from_str::(&data) {\n Err(e) => {\n // not sure should this be a hard error\n tracing::warn!(\"failed to deserialize server message: {e}\");\n return self.poll_next(cx);\n }\n Ok(message) => {\n return Poll::Ready(Some(Ok(message)));\n }\n };\n } else {\n return self.poll_next(cx);\n }\n }\n Some(Err(e)) => {\n tracing::warn!(\"sse stream error: {e}\");\n let retrying = this\n .connector\n .retry_connection(this.last_event_id.as_deref());\n SseAutoReconnectStreamState::Retrying {\n retry_times: 0,\n retrying,\n }\n }\n None => {\n tracing::debug!(\"sse stream terminated\");\n return Poll::Ready(None);\n }\n }\n }\n SseAutoReconnectStreamStateProj::Retrying {\n retry_times,\n retrying,\n } => {\n let retry_result = ready!(retrying.poll(cx));\n match retry_result {\n Ok(new_stream) => SseAutoReconnectStreamState::Connected { stream: new_stream },\n Err(e) => {\n tracing::debug!(\"retry sse stream error: {e}\");\n *retry_times += 1;\n if let Some(interval) = this.retry_policy.retry(*retry_times) {\n let interval = this\n .server_retry_interval\n .map(|server_retry_interval| server_retry_interval.max(interval))\n .unwrap_or(interval);\n let sleep = tokio::time::sleep(interval);\n SseAutoReconnectStreamState::WaitingNextRetry {\n sleep,\n retry_times: *retry_times,\n }\n } else {\n tracing::error!(\"sse stream error: {e}, max retry times reached\");\n this.state.set(SseAutoReconnectStreamState::Terminated);\n return Poll::Ready(Some(Err(e)));\n }\n }\n }\n }\n SseAutoReconnectStreamStateProj::WaitingNextRetry { sleep, retry_times } => {\n ready!(sleep.poll(cx));\n let retrying = this\n .connector\n .retry_connection(this.last_event_id.as_deref());\n let retry_times = *retry_times;\n SseAutoReconnectStreamState::Retrying {\n retry_times,\n retrying,\n }\n }\n SseAutoReconnectStreamStateProj::Terminated => {\n return Poll::Ready(None);\n }\n };\n // update the state\n this.state.set(next_state);\n self.poll_next(cx)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/service.rs", "use futures::{FutureExt, future::BoxFuture};\nuse thiserror::Error;\n\nuse crate::{\n error::ErrorData as McpError,\n model::{\n CancelledNotification, CancelledNotificationParam, Extensions, GetExtensions, GetMeta,\n JsonRpcBatchRequestItem, JsonRpcBatchResponseItem, JsonRpcError, JsonRpcMessage,\n JsonRpcNotification, JsonRpcRequest, JsonRpcResponse, Meta, NumberOrString, ProgressToken,\n RequestId, ServerJsonRpcMessage,\n },\n transport::{DynamicTransportError, IntoTransport, Transport},\n};\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\nmod client;\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub use client::*;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\nmod server;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub use server::*;\n#[cfg(feature = \"tower\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"tower\")))]\nmod tower;\nuse tokio_util::sync::{CancellationToken, DropGuard};\n#[cfg(feature = \"tower\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"tower\")))]\npub use tower::*;\nuse tracing::instrument;\n#[derive(Error, Debug)]\n#[non_exhaustive]\npub enum ServiceError {\n #[error(\"Mcp error: {0}\")]\n McpError(McpError),\n #[error(\"Transport send error: {0}\")]\n TransportSend(DynamicTransportError),\n #[error(\"Transport closed\")]\n TransportClosed,\n #[error(\"Unexpected response type\")]\n UnexpectedResponse,\n #[error(\"task cancelled for reason {}\", reason.as_deref().unwrap_or(\"\"))]\n Cancelled { reason: Option },\n #[error(\"request timeout after {}\", chrono::Duration::from_std(*timeout).unwrap_or_default())]\n Timeout { timeout: Duration },\n}\n\ntrait TransferObject:\n std::fmt::Debug + Clone + serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static\n{\n}\n\nimpl TransferObject for T where\n T: std::fmt::Debug\n + serde::Serialize\n + serde::de::DeserializeOwned\n + Send\n + Sync\n + 'static\n + Clone\n{\n}\n\n#[allow(private_bounds, reason = \"there's no the third implementation\")]\npub trait ServiceRole: std::fmt::Debug + Send + Sync + 'static + Copy + Clone {\n type Req: TransferObject + GetMeta + GetExtensions;\n type Resp: TransferObject;\n type Not: TryInto\n + From\n + TransferObject;\n type PeerReq: TransferObject + GetMeta + GetExtensions;\n type PeerResp: TransferObject;\n type PeerNot: TryInto\n + From\n + TransferObject\n + GetMeta\n + GetExtensions;\n type InitializeError;\n const IS_CLIENT: bool;\n type Info: TransferObject;\n type PeerInfo: TransferObject;\n}\n\npub type TxJsonRpcMessage =\n JsonRpcMessage<::Req, ::Resp, ::Not>;\npub type RxJsonRpcMessage = JsonRpcMessage<\n ::PeerReq,\n ::PeerResp,\n ::PeerNot,\n>;\n\npub trait Service: Send + Sync + 'static {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> impl Future> + Send + '_;\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> impl Future> + Send + '_;\n fn get_info(&self) -> R::Info;\n}\n\npub trait ServiceExt: Service + Sized {\n /// Convert this service to a dynamic boxed service\n ///\n /// This could be very helpful when you want to store the services in a collection\n fn into_dyn(self) -> Box> {\n Box::new(self)\n }\n fn serve(\n self,\n transport: T,\n ) -> impl Future, R::InitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized,\n {\n Self::serve_with_ct(self, transport, Default::default())\n }\n fn serve_with_ct(\n self,\n transport: T,\n ct: CancellationToken,\n ) -> impl Future, R::InitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized;\n}\n\nimpl Service for Box> {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n DynService::handle_request(self.as_ref(), request, context)\n }\n\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> impl Future> + Send + '_ {\n DynService::handle_notification(self.as_ref(), notification, context)\n }\n\n fn get_info(&self) -> R::Info {\n DynService::get_info(self.as_ref())\n }\n}\n\npub trait DynService: Send + Sync {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> BoxFuture>;\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> BoxFuture>;\n fn get_info(&self) -> R::Info;\n}\n\nimpl> DynService for S {\n fn handle_request(\n &self,\n request: R::PeerReq,\n context: RequestContext,\n ) -> BoxFuture> {\n Box::pin(self.handle_request(request, context))\n }\n fn handle_notification(\n &self,\n notification: R::PeerNot,\n context: NotificationContext,\n ) -> BoxFuture> {\n Box::pin(self.handle_notification(notification, context))\n }\n fn get_info(&self) -> R::Info {\n self.get_info()\n }\n}\n\nuse std::{\n collections::{HashMap, VecDeque},\n ops::Deref,\n sync::{Arc, atomic::AtomicU32},\n time::Duration,\n};\n\nuse tokio::sync::mpsc;\n\npub trait RequestIdProvider: Send + Sync + 'static {\n fn next_request_id(&self) -> RequestId;\n}\n\npub trait ProgressTokenProvider: Send + Sync + 'static {\n fn next_progress_token(&self) -> ProgressToken;\n}\n\npub type AtomicU32RequestIdProvider = AtomicU32Provider;\npub type AtomicU32ProgressTokenProvider = AtomicU32Provider;\n\n#[derive(Debug, Default)]\npub struct AtomicU32Provider {\n id: AtomicU32,\n}\n\nimpl RequestIdProvider for AtomicU32Provider {\n fn next_request_id(&self) -> RequestId {\n RequestId::Number(self.id.fetch_add(1, std::sync::atomic::Ordering::SeqCst))\n }\n}\n\nimpl ProgressTokenProvider for AtomicU32Provider {\n fn next_progress_token(&self) -> ProgressToken {\n ProgressToken(NumberOrString::Number(\n self.id.fetch_add(1, std::sync::atomic::Ordering::SeqCst),\n ))\n }\n}\n\ntype Responder = tokio::sync::oneshot::Sender;\n\n/// A handle to a remote request\n///\n/// You can cancel it by call [`RequestHandle::cancel`] with a reason,\n///\n/// or wait for response by call [`RequestHandle::await_response`]\n#[derive(Debug)]\npub struct RequestHandle {\n pub rx: tokio::sync::oneshot::Receiver>,\n pub options: PeerRequestOptions,\n pub peer: Peer,\n pub id: RequestId,\n pub progress_token: ProgressToken,\n}\n\nimpl RequestHandle {\n pub const REQUEST_TIMEOUT_REASON: &str = \"request timeout\";\n pub async fn await_response(self) -> Result {\n if let Some(timeout) = self.options.timeout {\n let timeout_result = tokio::time::timeout(timeout, async move {\n self.rx.await.map_err(|_e| ServiceError::TransportClosed)?\n })\n .await;\n match timeout_result {\n Ok(response) => response,\n Err(_) => {\n let error = Err(ServiceError::Timeout { timeout });\n // cancel this request\n let notification = CancelledNotification {\n params: CancelledNotificationParam {\n request_id: self.id,\n reason: Some(Self::REQUEST_TIMEOUT_REASON.to_owned()),\n },\n method: crate::model::CancelledNotificationMethod,\n extensions: Default::default(),\n };\n let _ = self.peer.send_notification(notification.into()).await;\n error\n }\n }\n } else {\n self.rx.await.map_err(|_e| ServiceError::TransportClosed)?\n }\n }\n\n /// Cancel this request\n pub async fn cancel(self, reason: Option) -> Result<(), ServiceError> {\n let notification = CancelledNotification {\n params: CancelledNotificationParam {\n request_id: self.id,\n reason,\n },\n method: crate::model::CancelledNotificationMethod,\n extensions: Default::default(),\n };\n self.peer.send_notification(notification.into()).await?;\n Ok(())\n }\n}\n\n#[derive(Debug)]\npub(crate) enum PeerSinkMessage {\n Request {\n request: R::Req,\n id: RequestId,\n responder: Responder>,\n },\n Notification {\n notification: R::Not,\n responder: Responder>,\n },\n}\n\n/// An interface to fetch the remote client or server\n///\n/// For general purpose, call [`Peer::send_request`] or [`Peer::send_notification`] to send message to remote peer.\n///\n/// To create a cancellable request, call [`Peer::send_request_with_option`].\n#[derive(Clone)]\npub struct Peer {\n tx: mpsc::Sender>,\n request_id_provider: Arc,\n progress_token_provider: Arc,\n info: Arc>,\n}\n\nimpl std::fmt::Debug for Peer {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"PeerSink\")\n .field(\"tx\", &self.tx)\n .field(\"is_client\", &R::IS_CLIENT)\n .finish()\n }\n}\n\ntype ProxyOutbound = mpsc::Receiver>;\n\n#[derive(Debug, Default)]\npub struct PeerRequestOptions {\n pub timeout: Option,\n pub meta: Option,\n}\n\nimpl PeerRequestOptions {\n pub fn no_options() -> Self {\n Self::default()\n }\n}\n\nimpl Peer {\n const CLIENT_CHANNEL_BUFFER_SIZE: usize = 1024;\n pub(crate) fn new(\n request_id_provider: Arc,\n peer_info: Option,\n ) -> (Peer, ProxyOutbound) {\n let (tx, rx) = mpsc::channel(Self::CLIENT_CHANNEL_BUFFER_SIZE);\n (\n Self {\n tx,\n request_id_provider,\n progress_token_provider: Arc::new(AtomicU32ProgressTokenProvider::default()),\n info: Arc::new(tokio::sync::OnceCell::new_with(peer_info)),\n },\n rx,\n )\n }\n pub async fn send_notification(&self, notification: R::Not) -> Result<(), ServiceError> {\n let (responder, receiver) = tokio::sync::oneshot::channel();\n self.tx\n .send(PeerSinkMessage::Notification {\n notification,\n responder,\n })\n .await\n .map_err(|_m| ServiceError::TransportClosed)?;\n receiver.await.map_err(|_e| ServiceError::TransportClosed)?\n }\n pub async fn send_request(&self, request: R::Req) -> Result {\n self.send_request_with_option(request, PeerRequestOptions::no_options())\n .await?\n .await_response()\n .await\n }\n\n pub async fn send_cancellable_request(\n &self,\n request: R::Req,\n options: PeerRequestOptions,\n ) -> Result, ServiceError> {\n self.send_request_with_option(request, options).await\n }\n\n pub async fn send_request_with_option(\n &self,\n mut request: R::Req,\n options: PeerRequestOptions,\n ) -> Result, ServiceError> {\n let id = self.request_id_provider.next_request_id();\n let progress_token = self.progress_token_provider.next_progress_token();\n request\n .get_meta_mut()\n .set_progress_token(progress_token.clone());\n if let Some(meta) = options.meta.clone() {\n request.get_meta_mut().extend(meta);\n }\n let (responder, receiver) = tokio::sync::oneshot::channel();\n self.tx\n .send(PeerSinkMessage::Request {\n request,\n id: id.clone(),\n responder,\n })\n .await\n .map_err(|_m| ServiceError::TransportClosed)?;\n Ok(RequestHandle {\n id,\n rx: receiver,\n progress_token,\n options,\n peer: self.clone(),\n })\n }\n pub fn peer_info(&self) -> Option<&R::PeerInfo> {\n self.info.get()\n }\n\n pub fn set_peer_info(&self, info: R::PeerInfo) {\n if self.info.initialized() {\n tracing::warn!(\"trying to set peer info, which is already initialized\");\n } else {\n let _ = self.info.set(info);\n }\n }\n\n pub fn is_transport_closed(&self) -> bool {\n self.tx.is_closed()\n }\n}\n\n#[derive(Debug)]\npub struct RunningService> {\n service: Arc,\n peer: Peer,\n handle: tokio::task::JoinHandle,\n cancellation_token: CancellationToken,\n dg: DropGuard,\n}\nimpl> Deref for RunningService {\n type Target = Peer;\n\n fn deref(&self) -> &Self::Target {\n &self.peer\n }\n}\n\nimpl> RunningService {\n #[inline]\n pub fn peer(&self) -> &Peer {\n &self.peer\n }\n #[inline]\n pub fn service(&self) -> &S {\n self.service.as_ref()\n }\n #[inline]\n pub fn cancellation_token(&self) -> RunningServiceCancellationToken {\n RunningServiceCancellationToken(self.cancellation_token.clone())\n }\n #[inline]\n pub async fn waiting(self) -> Result {\n self.handle.await\n }\n pub async fn cancel(self) -> Result {\n let RunningService { dg, handle, .. } = self;\n dg.disarm().cancel();\n handle.await\n }\n}\n\n// use a wrapper type so we can tweak the implementation if needed\npub struct RunningServiceCancellationToken(CancellationToken);\n\nimpl RunningServiceCancellationToken {\n pub fn cancel(self) {\n self.0.cancel();\n }\n}\n\n#[derive(Debug)]\npub enum QuitReason {\n Cancelled,\n Closed,\n JoinError(tokio::task::JoinError),\n}\n\n/// Request execution context\n#[derive(Debug, Clone)]\npub struct RequestContext {\n /// this token will be cancelled when the [`CancelledNotification`] is received.\n pub ct: CancellationToken,\n pub id: RequestId,\n pub meta: Meta,\n pub extensions: Extensions,\n /// An interface to fetch the remote client or server\n pub peer: Peer,\n}\n\n/// Request execution context\n#[derive(Debug, Clone)]\npub struct NotificationContext {\n pub meta: Meta,\n pub extensions: Extensions,\n /// An interface to fetch the remote client or server\n pub peer: Peer,\n}\n\n/// Use this function to skip initialization process\npub fn serve_directly(\n service: S,\n transport: T,\n peer_info: Option,\n) -> RunningService\nwhere\n R: ServiceRole,\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n serve_directly_with_ct(service, transport, peer_info, Default::default())\n}\n\n/// Use this function to skip initialization process\npub fn serve_directly_with_ct(\n service: S,\n transport: T,\n peer_info: Option,\n ct: CancellationToken,\n) -> RunningService\nwhere\n R: ServiceRole,\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n let (peer, peer_rx) = Peer::new(Arc::new(AtomicU32RequestIdProvider::default()), peer_info);\n serve_inner(service, transport.into_transport(), peer, peer_rx, ct)\n}\n\n#[instrument(skip_all)]\nfn serve_inner(\n service: S,\n transport: T,\n peer: Peer,\n mut peer_rx: tokio::sync::mpsc::Receiver>,\n ct: CancellationToken,\n) -> RunningService\nwhere\n R: ServiceRole,\n S: Service,\n T: Transport + 'static,\n{\n const SINK_PROXY_BUFFER_SIZE: usize = 64;\n let (sink_proxy_tx, mut sink_proxy_rx) =\n tokio::sync::mpsc::channel::>(SINK_PROXY_BUFFER_SIZE);\n let peer_info = peer.peer_info();\n if R::IS_CLIENT {\n tracing::info!(?peer_info, \"Service initialized as client\");\n } else {\n tracing::info!(?peer_info, \"Service initialized as server\");\n }\n\n let mut local_responder_pool =\n HashMap::>>::new();\n let mut local_ct_pool = HashMap::::new();\n let shared_service = Arc::new(service);\n // for return\n let service = shared_service.clone();\n\n // let message_sink = tokio::sync::\n // let mut stream = std::pin::pin!(stream);\n let serve_loop_ct = ct.child_token();\n let peer_return: Peer = peer.clone();\n let handle = tokio::spawn(async move {\n let mut transport = transport.into_transport();\n let mut batch_messages = VecDeque::>::new();\n let mut send_task_set = tokio::task::JoinSet::::new();\n #[derive(Debug)]\n enum SendTaskResult {\n Request {\n id: RequestId,\n result: Result<(), DynamicTransportError>,\n },\n Notification {\n responder: Responder>,\n cancellation_param: Option,\n result: Result<(), DynamicTransportError>,\n },\n }\n #[derive(Debug)]\n enum Event {\n ProxyMessage(PeerSinkMessage),\n PeerMessage(RxJsonRpcMessage),\n ToSink(TxJsonRpcMessage),\n SendTaskResult(SendTaskResult),\n }\n\n let quit_reason = loop {\n let evt = if let Some(m) = batch_messages.pop_front() {\n Event::PeerMessage(m)\n } else {\n tokio::select! {\n m = sink_proxy_rx.recv(), if !sink_proxy_rx.is_closed() => {\n if let Some(m) = m {\n Event::ToSink(m)\n } else {\n continue\n }\n }\n m = transport.receive() => {\n if let Some(m) = m {\n Event::PeerMessage(m)\n } else {\n // input stream closed\n tracing::info!(\"input stream terminated\");\n break QuitReason::Closed\n }\n }\n m = peer_rx.recv(), if !peer_rx.is_closed() => {\n if let Some(m) = m {\n Event::ProxyMessage(m)\n } else {\n continue\n }\n }\n m = send_task_set.join_next(), if !send_task_set.is_empty() => {\n let Some(result) = m else {\n continue\n };\n match result {\n Err(e) => {\n // join error, which is serious, we should quit.\n tracing::error!(%e, \"send request task encounter a tokio join error\");\n break QuitReason::JoinError(e)\n }\n Ok(result) => {\n Event::SendTaskResult(result)\n }\n }\n }\n _ = serve_loop_ct.cancelled() => {\n tracing::info!(\"task cancelled\");\n break QuitReason::Cancelled\n }\n }\n };\n\n tracing::trace!(?evt, \"new event\");\n match evt {\n Event::SendTaskResult(SendTaskResult::Request { id, result }) => {\n if let Err(e) = result {\n if let Some(responder) = local_responder_pool.remove(&id) {\n let _ = responder.send(Err(ServiceError::TransportSend(e)));\n }\n }\n }\n Event::SendTaskResult(SendTaskResult::Notification {\n responder,\n result,\n cancellation_param,\n }) => {\n let response = if let Err(e) = result {\n Err(ServiceError::TransportSend(e))\n } else {\n Ok(())\n };\n let _ = responder.send(response);\n if let Some(param) = cancellation_param {\n if let Some(responder) = local_responder_pool.remove(¶m.request_id) {\n tracing::info!(id = %param.request_id, reason = param.reason, \"cancelled\");\n let _response_result = responder.send(Err(ServiceError::Cancelled {\n reason: param.reason.clone(),\n }));\n }\n }\n }\n // response and error\n Event::ToSink(m) => {\n if let Some(id) = match &m {\n JsonRpcMessage::Response(response) => Some(&response.id),\n JsonRpcMessage::Error(error) => Some(&error.id),\n _ => None,\n } {\n if let Some(ct) = local_ct_pool.remove(id) {\n ct.cancel();\n }\n let send = transport.send(m);\n tokio::spawn(async move {\n let send_result = send.await;\n if let Err(error) = send_result {\n tracing::error!(%error, \"fail to response message\");\n }\n });\n }\n }\n Event::ProxyMessage(PeerSinkMessage::Request {\n request,\n id,\n responder,\n }) => {\n local_responder_pool.insert(id.clone(), responder);\n let send = transport.send(JsonRpcMessage::request(request, id.clone()));\n {\n let id = id.clone();\n send_task_set.spawn(send.map(move |r| SendTaskResult::Request {\n id,\n result: r.map_err(DynamicTransportError::new::),\n }));\n }\n }\n Event::ProxyMessage(PeerSinkMessage::Notification {\n notification,\n responder,\n }) => {\n // catch cancellation notification\n let mut cancellation_param = None;\n let notification = match notification.try_into() {\n Ok::(cancelled) => {\n cancellation_param.replace(cancelled.params.clone());\n cancelled.into()\n }\n Err(notification) => notification,\n };\n let send = transport.send(JsonRpcMessage::notification(notification));\n send_task_set.spawn(send.map(move |result| SendTaskResult::Notification {\n responder,\n cancellation_param,\n result: result.map_err(DynamicTransportError::new::),\n }));\n }\n Event::PeerMessage(JsonRpcMessage::Request(JsonRpcRequest {\n id,\n mut request,\n ..\n })) => {\n tracing::debug!(%id, ?request, \"received request\");\n {\n let service = shared_service.clone();\n let sink = sink_proxy_tx.clone();\n let request_ct = serve_loop_ct.child_token();\n let context_ct = request_ct.child_token();\n local_ct_pool.insert(id.clone(), request_ct);\n let mut extensions = Extensions::new();\n let mut meta = Meta::new();\n // avoid clone\n // swap meta firstly, otherwise progress token will be lost\n std::mem::swap(&mut meta, request.get_meta_mut());\n std::mem::swap(&mut extensions, request.extensions_mut());\n let context = RequestContext {\n ct: context_ct,\n id: id.clone(),\n peer: peer.clone(),\n meta,\n extensions,\n };\n tokio::spawn(async move {\n let result = service.handle_request(request, context).await;\n let response = match result {\n Ok(result) => {\n tracing::debug!(%id, ?result, \"response message\");\n JsonRpcMessage::response(result, id)\n }\n Err(error) => {\n tracing::warn!(%id, ?error, \"response error\");\n JsonRpcMessage::error(error, id)\n }\n };\n let _send_result = sink.send(response).await;\n });\n }\n }\n Event::PeerMessage(JsonRpcMessage::Notification(JsonRpcNotification {\n notification,\n ..\n })) => {\n tracing::info!(?notification, \"received notification\");\n // catch cancelled notification\n let mut notification = match notification.try_into() {\n Ok::(cancelled) => {\n if let Some(ct) = local_ct_pool.remove(&cancelled.params.request_id) {\n tracing::info!(id = %cancelled.params.request_id, reason = cancelled.params.reason, \"cancelled\");\n ct.cancel();\n }\n cancelled.into()\n }\n Err(notification) => notification,\n };\n {\n let service = shared_service.clone();\n let mut extensions = Extensions::new();\n let mut meta = Meta::new();\n // avoid clone\n std::mem::swap(&mut extensions, notification.extensions_mut());\n std::mem::swap(&mut meta, notification.get_meta_mut());\n let context = NotificationContext {\n peer: peer.clone(),\n meta,\n extensions,\n };\n tokio::spawn(async move {\n let result = service.handle_notification(notification, context).await;\n if let Err(error) = result {\n tracing::warn!(%error, \"Error sending notification\");\n }\n });\n }\n }\n Event::PeerMessage(JsonRpcMessage::Response(JsonRpcResponse {\n result,\n id,\n ..\n })) => {\n if let Some(responder) = local_responder_pool.remove(&id) {\n let response_result = responder.send(Ok(result));\n if let Err(_error) = response_result {\n tracing::warn!(%id, \"Error sending response\");\n }\n }\n }\n Event::PeerMessage(JsonRpcMessage::Error(JsonRpcError { error, id, .. })) => {\n if let Some(responder) = local_responder_pool.remove(&id) {\n let _response_result = responder.send(Err(ServiceError::McpError(error)));\n if let Err(_error) = _response_result {\n tracing::warn!(%id, \"Error sending response\");\n }\n }\n }\n Event::PeerMessage(JsonRpcMessage::BatchRequest(batch)) => {\n batch_messages.extend(\n batch\n .into_iter()\n .map(JsonRpcBatchRequestItem::into_non_batch_message),\n );\n }\n Event::PeerMessage(JsonRpcMessage::BatchResponse(batch)) => {\n batch_messages.extend(\n batch\n .into_iter()\n .map(JsonRpcBatchResponseItem::into_non_batch_message),\n );\n }\n }\n };\n let sink_close_result = transport.close().await;\n if let Err(e) = sink_close_result {\n tracing::error!(%e, \"fail to close sink\");\n }\n tracing::info!(?quit_reason, \"serve finished\");\n quit_reason\n });\n RunningService {\n service,\n peer: peer_return,\n handle,\n cancellation_token: ct.clone(),\n dg: ct.drop_guard(),\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport.rs", "//! # Transport\n//! The transport type must implemented [`Transport`] trait, which allow it send message concurrently and receive message sequentially.\n//!\n//! ## Standard Transport Types\n//! There are 3 pairs of standard transport types:\n//!\n//! | transport | client | server |\n//! |:-: |:-: |:-: |\n//! | std IO | [`child_process::TokioChildProcess`] | [`io::stdio`] |\n//! | streamable http | [`streamable_http_client::StreamableHttpClientTransport`] | [`streamable_http_server::StreamableHttpService`] |\n//! | sse | [`sse_client::SseClientTransport`] | [`sse_server::SseServer`] |\n//!\n//!## Helper Transport Types\n//! Thers are several helper transport types that can help you to create transport quickly.\n//!\n//! ### [Worker Transport](`worker::WorkerTransport`)\n//! Which allows you to run a worker and process messages in another tokio task.\n//!\n//! ### [Async Read/Write Transport](`async_rw::AsyncRwTransport`)\n//! You need to enable `transport-async-rw` feature to use this transport.\n//!\n//! This transport is used to create a transport from a byte stream which implemented [`tokio::io::AsyncRead`] and [`tokio::io::AsyncWrite`].\n//!\n//! This could be very helpful when you want to create a transport from a byte stream, such as a file or a tcp connection.\n//!\n//! ### [Sink/Stream Transport](`sink_stream::SinkStreamTransport`)\n//! This transport is used to create a transport from a sink and a stream.\n//!\n//! This could be very helpful when you want to create a transport from a duplex object stream, such as a websocket connection.\n//!\n//! ## [IntoTransport](`IntoTransport`) trait\n//! [`IntoTransport`] is a helper trait that implicitly convert a type into a transport type.\n//!\n//! ### These types is automatically implemented [`IntoTransport`] trait\n//! 1. A type that already implement both [`futures::Sink`] and [`futures::Stream`] trait, or a tuple `(Tx, Rx)` where `Tx` is [`futures::Sink`] and `Rx` is [`futures::Stream`].\n//! 2. A type that implement both [`tokio::io::AsyncRead`] and [`tokio::io::AsyncWrite`] trait. or a tuple `(R, W)` where `R` is [`tokio::io::AsyncRead`] and `W` is [`tokio::io::AsyncWrite`].\n//! 3. A type that implement [Worker](`worker::Worker`) trait.\n//! 4. A type that implement [`Transport`] trait.\n//!\n//! ## Examples\n//!\n//! ```rust\n//! # use rmcp::{\n//! # ServiceExt, serve_client, serve_server,\n//! # };\n//!\n//! // create transport from tcp stream\n//! async fn client() -> Result<(), Box> {\n//! let stream = tokio::net::TcpSocket::new_v4()?\n//! .connect(\"127.0.0.1:8001\".parse()?)\n//! .await?;\n//! let client = ().serve(stream).await?;\n//! let tools = client.peer().list_tools(Default::default()).await?;\n//! println!(\"{:?}\", tools);\n//! Ok(())\n//! }\n//!\n//! // create transport from std io\n//! async fn io() -> Result<(), Box> {\n//! let client = ().serve((tokio::io::stdin(), tokio::io::stdout())).await?;\n//! let tools = client.peer().list_tools(Default::default()).await?;\n//! println!(\"{:?}\", tools);\n//! Ok(())\n//! }\n//! ```\n\nuse std::{borrow::Cow, sync::Arc};\n\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\npub mod sink_stream;\n\n#[cfg(feature = \"transport-async-rw\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-async-rw\")))]\npub mod async_rw;\n\n#[cfg(feature = \"transport-worker\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-worker\")))]\npub mod worker;\n#[cfg(feature = \"transport-worker\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-worker\")))]\npub use worker::WorkerTransport;\n\n#[cfg(feature = \"transport-child-process\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-child-process\")))]\npub mod child_process;\n#[cfg(feature = \"transport-child-process\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-child-process\")))]\npub use child_process::{ConfigureCommandExt, TokioChildProcess};\n\n#[cfg(feature = \"transport-io\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-io\")))]\npub mod io;\n#[cfg(feature = \"transport-io\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-io\")))]\npub use io::stdio;\n\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\npub mod sse_client;\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\npub use sse_client::SseClientTransport;\n\n#[cfg(feature = \"transport-sse-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-server\")))]\npub mod sse_server;\n#[cfg(feature = \"transport-sse-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-server\")))]\npub use sse_server::SseServer;\n\n#[cfg(feature = \"auth\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\npub mod auth;\n#[cfg(feature = \"auth\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\npub use auth::{AuthError, AuthorizationManager, AuthorizationSession, AuthorizedHttpClient};\n\n// #[cfg(feature = \"transport-ws\")]\n// #[cfg_attr(docsrs, doc(cfg(feature = \"transport-ws\")))]\n// pub mod ws;\n#[cfg(feature = \"transport-streamable-http-server-session\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server-session\")))]\npub mod streamable_http_server;\n#[cfg(feature = \"transport-streamable-http-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server\")))]\npub use streamable_http_server::tower::{StreamableHttpServerConfig, StreamableHttpService};\n\n#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\npub mod streamable_http_client;\n#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\npub use streamable_http_client::StreamableHttpClientTransport;\n\n/// Common use codes\npub mod common;\n\npub trait Transport: Send\nwhere\n R: ServiceRole,\n{\n type Error: std::error::Error + Send + Sync + 'static;\n fn name() -> Cow<'static, str> {\n std::any::type_name::().into()\n }\n /// Send a message to the transport\n ///\n /// Notice that the future returned by this function should be `Send` and `'static`.\n /// It's because the sending message could be executed concurrently.\n ///\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static;\n\n /// Receive a message from the transport, this operation is sequential.\n fn receive(&mut self) -> impl Future>> + Send;\n\n /// Close the transport\n fn close(&mut self) -> impl Future> + Send;\n}\n\npub trait IntoTransport: Send + 'static\nwhere\n R: ServiceRole,\n E: std::error::Error + Send + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static;\n}\n\npub enum TransportAdapterIdentity {}\nimpl IntoTransport for T\nwhere\n T: Transport + Send + 'static,\n R: ServiceRole,\n E: std::error::Error + Send + Sync + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n self\n }\n}\n\n/// A transport that can send a single message and then close itself\npub struct OneshotTransport\nwhere\n R: ServiceRole,\n{\n message: Option>,\n sender: tokio::sync::mpsc::Sender>,\n finished_signal: Arc,\n}\n\nimpl OneshotTransport\nwhere\n R: ServiceRole,\n{\n pub fn new(\n message: RxJsonRpcMessage,\n ) -> (Self, tokio::sync::mpsc::Receiver>) {\n let (sender, receiver) = tokio::sync::mpsc::channel(16);\n (\n Self {\n message: Some(message),\n sender,\n finished_signal: Arc::new(tokio::sync::Notify::new()),\n },\n receiver,\n )\n }\n}\n\nimpl Transport for OneshotTransport\nwhere\n R: ServiceRole,\n{\n type Error = tokio::sync::mpsc::error::SendError>;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let sender = self.sender.clone();\n let terminate = matches!(item, TxJsonRpcMessage::::Response(_));\n let signal = self.finished_signal.clone();\n async move {\n sender.send(item).await?;\n if terminate {\n signal.notify_waiters();\n }\n Ok(())\n }\n }\n\n async fn receive(&mut self) -> Option> {\n if self.message.is_none() {\n self.finished_signal.notified().await;\n }\n self.message.take()\n }\n\n fn close(&mut self) -> impl Future> + Send {\n self.message.take();\n std::future::ready(Ok(()))\n }\n}\n\n#[derive(Debug, thiserror::Error)]\n#[error(\"Transport [{transport_name}] error: {error}\")]\npub struct DynamicTransportError {\n pub transport_name: Cow<'static, str>,\n pub transport_type_id: std::any::TypeId,\n #[source]\n pub error: Box,\n}\n\nimpl DynamicTransportError {\n pub fn new + 'static, R: ServiceRole>(e: T::Error) -> Self {\n Self {\n transport_name: T::name(),\n transport_type_id: std::any::TypeId::of::(),\n error: Box::new(e),\n }\n }\n pub fn downcast + 'static, R: ServiceRole>(self) -> Result {\n if !self.is::() {\n Err(self)\n } else {\n Ok(self\n .error\n .downcast::()\n .map(|e| *e)\n .expect(\"type is checked\"))\n }\n }\n pub fn is + 'static, R: ServiceRole>(&self) -> bool {\n self.error.is::() && self.transport_type_id == std::any::TypeId::of::()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/service/client.rs", "use std::borrow::Cow;\n\nuse thiserror::Error;\n\nuse super::*;\nuse crate::{\n model::{\n CallToolRequest, CallToolRequestParam, CallToolResult, CancelledNotification,\n CancelledNotificationParam, ClientInfo, ClientJsonRpcMessage, ClientNotification,\n ClientRequest, ClientResult, CompleteRequest, CompleteRequestParam, CompleteResult,\n GetPromptRequest, GetPromptRequestParam, GetPromptResult, InitializeRequest,\n InitializedNotification, JsonRpcResponse, ListPromptsRequest, ListPromptsResult,\n ListResourceTemplatesRequest, ListResourceTemplatesResult, ListResourcesRequest,\n ListResourcesResult, ListToolsRequest, ListToolsResult, PaginatedRequestParam,\n ProgressNotification, ProgressNotificationParam, ReadResourceRequest,\n ReadResourceRequestParam, ReadResourceResult, RequestId, RootsListChangedNotification,\n ServerInfo, ServerJsonRpcMessage, ServerNotification, ServerRequest, ServerResult,\n SetLevelRequest, SetLevelRequestParam, SubscribeRequest, SubscribeRequestParam,\n UnsubscribeRequest, UnsubscribeRequestParam,\n },\n transport::DynamicTransportError,\n};\n\n/// It represents the error that may occur when serving the client.\n///\n/// if you want to handle the error, you can use `serve_client_with_ct` or `serve_client` with `Result, ClientError>`\n#[derive(Error, Debug)]\npub enum ClientInitializeError {\n #[error(\"expect initialized response, but received: {0:?}\")]\n ExpectedInitResponse(Option),\n\n #[error(\"expect initialized result, but received: {0:?}\")]\n ExpectedInitResult(Option),\n\n #[error(\"conflict initialized response id: expected {0}, got {1}\")]\n ConflictInitResponseId(RequestId, RequestId),\n\n #[error(\"connection closed: {0}\")]\n ConnectionClosed(String),\n\n #[error(\"Send message error {error}, when {context}\")]\n TransportError {\n error: DynamicTransportError,\n context: Cow<'static, str>,\n },\n\n #[error(\"Cancelled\")]\n Cancelled,\n}\n\nimpl ClientInitializeError {\n pub fn transport + 'static>(\n error: T::Error,\n context: impl Into>,\n ) -> Self {\n Self::TransportError {\n error: DynamicTransportError::new::(error),\n context: context.into(),\n }\n }\n}\n\n/// Helper function to get the next message from the stream\nasync fn expect_next_message(\n transport: &mut T,\n context: &str,\n) -> Result\nwhere\n T: Transport,\n{\n transport\n .receive()\n .await\n .ok_or_else(|| ClientInitializeError::ConnectionClosed(context.to_string()))\n}\n\n/// Helper function to expect a response from the stream\nasync fn expect_response(\n transport: &mut T,\n context: &str,\n) -> Result<(ServerResult, RequestId), ClientInitializeError>\nwhere\n T: Transport,\n{\n let msg = expect_next_message(transport, context).await?;\n\n match msg {\n ServerJsonRpcMessage::Response(JsonRpcResponse { id, result, .. }) => Ok((result, id)),\n _ => Err(ClientInitializeError::ExpectedInitResponse(Some(msg))),\n }\n}\n\n#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]\npub struct RoleClient;\n\nimpl ServiceRole for RoleClient {\n type Req = ClientRequest;\n type Resp = ClientResult;\n type Not = ClientNotification;\n type PeerReq = ServerRequest;\n type PeerResp = ServerResult;\n type PeerNot = ServerNotification;\n type Info = ClientInfo;\n type PeerInfo = ServerInfo;\n type InitializeError = ClientInitializeError;\n const IS_CLIENT: bool = true;\n}\n\npub type ServerSink = Peer;\n\nimpl> ServiceExt for S {\n fn serve_with_ct(\n self,\n transport: T,\n ct: CancellationToken,\n ) -> impl Future, ClientInitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized,\n {\n serve_client_with_ct(self, transport, ct)\n }\n}\n\npub async fn serve_client(\n service: S,\n transport: T,\n) -> Result, ClientInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n serve_client_with_ct(service, transport, Default::default()).await\n}\n\npub async fn serve_client_with_ct(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ClientInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n tokio::select! {\n result = serve_client_with_ct_inner(service, transport.into_transport(), ct.clone()) => { result }\n _ = ct.cancelled() => {\n Err(ClientInitializeError::Cancelled)\n }\n }\n}\n\nasync fn serve_client_with_ct_inner(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ClientInitializeError>\nwhere\n S: Service,\n T: Transport + 'static,\n{\n let mut transport = transport.into_transport();\n let id_provider = >::default();\n\n // service\n let id = id_provider.next_request_id();\n let init_request = InitializeRequest {\n method: Default::default(),\n params: service.get_info(),\n extensions: Default::default(),\n };\n transport\n .send(ClientJsonRpcMessage::request(\n ClientRequest::InitializeRequest(init_request),\n id.clone(),\n ))\n .await\n .map_err(|error| ClientInitializeError::TransportError {\n error: DynamicTransportError::new::(error),\n context: \"send initialize request\".into(),\n })?;\n\n let (response, response_id) = expect_response(&mut transport, \"initialize response\").await?;\n\n if id != response_id {\n return Err(ClientInitializeError::ConflictInitResponseId(\n id,\n response_id,\n ));\n }\n\n let ServerResult::InitializeResult(initialize_result) = response else {\n return Err(ClientInitializeError::ExpectedInitResult(Some(response)));\n };\n\n // send notification\n let notification = ClientJsonRpcMessage::notification(\n ClientNotification::InitializedNotification(InitializedNotification {\n method: Default::default(),\n extensions: Default::default(),\n }),\n );\n transport.send(notification).await.map_err(|error| {\n ClientInitializeError::transport::(error, \"send initialized notification\")\n })?;\n let (peer, peer_rx) = Peer::new(id_provider, Some(initialize_result));\n Ok(serve_inner(service, transport, peer, peer_rx, ct))\n}\n\nmacro_rules! method {\n (peer_req $method:ident $Req:ident() => $Resp: ident ) => {\n pub async fn $method(&self) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident) => $Resp: ident ) => {\n pub async fn $method(&self, params: $Param) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident)? => $Resp: ident ) => {\n pub async fn $method(&self, params: Option<$Param>) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident)) => {\n pub async fn $method(&self, params: $Param) -> Result<(), ServiceError> {\n let result = self\n .send_request(ClientRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ServerResult::EmptyResult(_) => Ok(()),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n\n (peer_not $method:ident $Not:ident($Param: ident)) => {\n pub async fn $method(&self, params: $Param) -> Result<(), ServiceError> {\n self.send_notification(ClientNotification::$Not($Not {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n (peer_not $method:ident $Not:ident) => {\n pub async fn $method(&self) -> Result<(), ServiceError> {\n self.send_notification(ClientNotification::$Not($Not {\n method: Default::default(),\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n}\n\nimpl Peer {\n method!(peer_req complete CompleteRequest(CompleteRequestParam) => CompleteResult);\n method!(peer_req set_level SetLevelRequest(SetLevelRequestParam));\n method!(peer_req get_prompt GetPromptRequest(GetPromptRequestParam) => GetPromptResult);\n method!(peer_req list_prompts ListPromptsRequest(PaginatedRequestParam)? => ListPromptsResult);\n method!(peer_req list_resources ListResourcesRequest(PaginatedRequestParam)? => ListResourcesResult);\n method!(peer_req list_resource_templates ListResourceTemplatesRequest(PaginatedRequestParam)? => ListResourceTemplatesResult);\n method!(peer_req read_resource ReadResourceRequest(ReadResourceRequestParam) => ReadResourceResult);\n method!(peer_req subscribe SubscribeRequest(SubscribeRequestParam) );\n method!(peer_req unsubscribe UnsubscribeRequest(UnsubscribeRequestParam));\n method!(peer_req call_tool CallToolRequest(CallToolRequestParam) => CallToolResult);\n method!(peer_req list_tools ListToolsRequest(PaginatedRequestParam)? => ListToolsResult);\n\n method!(peer_not notify_cancelled CancelledNotification(CancelledNotificationParam));\n method!(peer_not notify_progress ProgressNotification(ProgressNotificationParam));\n method!(peer_not notify_initialized InitializedNotification);\n method!(peer_not notify_roots_list_changed RootsListChangedNotification);\n}\n\nimpl Peer {\n /// A wrapper method for [`Peer::list_tools`].\n ///\n /// This function will call [`Peer::list_tools`] multiple times until all tools are listed.\n pub async fn list_all_tools(&self) -> Result, ServiceError> {\n let mut tools = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_tools(Some(PaginatedRequestParam { cursor }))\n .await?;\n tools.extend(result.tools);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(tools)\n }\n\n /// A wrapper method for [`Peer::list_prompts`].\n ///\n /// This function will call [`Peer::list_prompts`] multiple times until all prompts are listed.\n pub async fn list_all_prompts(&self) -> Result, ServiceError> {\n let mut prompts = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_prompts(Some(PaginatedRequestParam { cursor }))\n .await?;\n prompts.extend(result.prompts);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(prompts)\n }\n\n /// A wrapper method for [`Peer::list_resources`].\n ///\n /// This function will call [`Peer::list_resources`] multiple times until all resources are listed.\n pub async fn list_all_resources(&self) -> Result, ServiceError> {\n let mut resources = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_resources(Some(PaginatedRequestParam { cursor }))\n .await?;\n resources.extend(result.resources);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(resources)\n }\n\n /// A wrapper method for [`Peer::list_resource_templates`].\n ///\n /// This function will call [`Peer::list_resource_templates`] multiple times until all resource templates are listed.\n pub async fn list_all_resource_templates(\n &self,\n ) -> Result, ServiceError> {\n let mut resource_templates = Vec::new();\n let mut cursor = None;\n loop {\n let result = self\n .list_resource_templates(Some(PaginatedRequestParam { cursor }))\n .await?;\n resource_templates.extend(result.resource_templates);\n cursor = result.next_cursor;\n if cursor.is_none() {\n break;\n }\n }\n Ok(resource_templates)\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/service/server.rs", "use std::borrow::Cow;\n\nuse thiserror::Error;\n\nuse super::*;\nuse crate::{\n model::{\n CancelledNotification, CancelledNotificationParam, ClientInfo, ClientJsonRpcMessage,\n ClientNotification, ClientRequest, ClientResult, CreateMessageRequest,\n CreateMessageRequestParam, CreateMessageResult, ErrorData, ListRootsRequest,\n ListRootsResult, LoggingMessageNotification, LoggingMessageNotificationParam,\n ProgressNotification, ProgressNotificationParam, PromptListChangedNotification,\n ProtocolVersion, ResourceListChangedNotification, ResourceUpdatedNotification,\n ResourceUpdatedNotificationParam, ServerInfo, ServerNotification, ServerRequest,\n ServerResult, ToolListChangedNotification,\n },\n transport::DynamicTransportError,\n};\n\n#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]\npub struct RoleServer;\n\nimpl ServiceRole for RoleServer {\n type Req = ServerRequest;\n type Resp = ServerResult;\n type Not = ServerNotification;\n type PeerReq = ClientRequest;\n type PeerResp = ClientResult;\n type PeerNot = ClientNotification;\n type Info = ServerInfo;\n type PeerInfo = ClientInfo;\n\n type InitializeError = ServerInitializeError;\n const IS_CLIENT: bool = false;\n}\n\n/// It represents the error that may occur when serving the server.\n///\n/// if you want to handle the error, you can use `serve_server_with_ct` or `serve_server` with `Result, ServerError>`\n#[derive(Error, Debug)]\npub enum ServerInitializeError {\n #[error(\"expect initialized request, but received: {0:?}\")]\n ExpectedInitializeRequest(Option),\n\n #[error(\"expect initialized notification, but received: {0:?}\")]\n ExpectedInitializedNotification(Option),\n\n #[error(\"connection closed: {0}\")]\n ConnectionClosed(String),\n\n #[error(\"unexpected initialize result: {0:?}\")]\n UnexpectedInitializeResponse(ServerResult),\n\n #[error(\"initialize failed: {0}\")]\n InitializeFailed(ErrorData),\n\n #[error(\"unsupported protocol version: {0}\")]\n UnsupportedProtocolVersion(ProtocolVersion),\n\n #[error(\"Send message error {error}, when {context}\")]\n TransportError {\n error: DynamicTransportError,\n context: Cow<'static, str>,\n },\n\n #[error(\"Cancelled\")]\n Cancelled,\n}\n\nimpl ServerInitializeError {\n pub fn transport + 'static>(\n error: T::Error,\n context: impl Into>,\n ) -> Self {\n Self::TransportError {\n error: DynamicTransportError::new::(error),\n context: context.into(),\n }\n }\n}\npub type ClientSink = Peer;\n\nimpl> ServiceExt for S {\n fn serve_with_ct(\n self,\n transport: T,\n ct: CancellationToken,\n ) -> impl Future, ServerInitializeError>> + Send\n where\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n Self: Sized,\n {\n serve_server_with_ct(self, transport, ct)\n }\n}\n\npub async fn serve_server(\n service: S,\n transport: T,\n) -> Result, ServerInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n serve_server_with_ct(service, transport, CancellationToken::new()).await\n}\n\n/// Helper function to get the next message from the stream\nasync fn expect_next_message(\n transport: &mut T,\n context: &str,\n) -> Result\nwhere\n T: Transport,\n{\n transport\n .receive()\n .await\n .ok_or_else(|| ServerInitializeError::ConnectionClosed(context.to_string()))\n}\n\n/// Helper function to expect a request from the stream\nasync fn expect_request(\n transport: &mut T,\n context: &str,\n) -> Result<(ClientRequest, RequestId), ServerInitializeError>\nwhere\n T: Transport,\n{\n let msg = expect_next_message(transport, context).await?;\n let msg_clone = msg.clone();\n msg.into_request()\n .ok_or(ServerInitializeError::ExpectedInitializeRequest(Some(\n msg_clone,\n )))\n}\n\n/// Helper function to expect a notification from the stream\nasync fn expect_notification(\n transport: &mut T,\n context: &str,\n) -> Result\nwhere\n T: Transport,\n{\n let msg = expect_next_message(transport, context).await?;\n let msg_clone = msg.clone();\n msg.into_notification()\n .ok_or(ServerInitializeError::ExpectedInitializedNotification(\n Some(msg_clone),\n ))\n}\n\npub async fn serve_server_with_ct(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ServerInitializeError>\nwhere\n S: Service,\n T: IntoTransport,\n E: std::error::Error + Send + Sync + 'static,\n{\n tokio::select! {\n result = serve_server_with_ct_inner(service, transport.into_transport(), ct.clone()) => { result }\n _ = ct.cancelled() => {\n Err(ServerInitializeError::Cancelled)\n }\n }\n}\n\nasync fn serve_server_with_ct_inner(\n service: S,\n transport: T,\n ct: CancellationToken,\n) -> Result, ServerInitializeError>\nwhere\n S: Service,\n T: Transport + 'static,\n{\n let mut transport = transport.into_transport();\n let id_provider = >::default();\n\n // Get initialize request\n let (request, id) = expect_request(&mut transport, \"initialized request\").await?;\n\n let ClientRequest::InitializeRequest(peer_info) = &request else {\n return Err(ServerInitializeError::ExpectedInitializeRequest(Some(\n ClientJsonRpcMessage::request(request, id),\n )));\n };\n let (peer, peer_rx) = Peer::new(id_provider, Some(peer_info.params.clone()));\n let context = RequestContext {\n ct: ct.child_token(),\n id: id.clone(),\n meta: request.get_meta().clone(),\n extensions: request.extensions().clone(),\n peer: peer.clone(),\n };\n // Send initialize response\n let init_response = service.handle_request(request.clone(), context).await;\n let mut init_response = match init_response {\n Ok(ServerResult::InitializeResult(init_response)) => init_response,\n Ok(result) => {\n return Err(ServerInitializeError::UnexpectedInitializeResponse(result));\n }\n Err(e) => {\n transport\n .send(ServerJsonRpcMessage::error(e.clone(), id))\n .await\n .map_err(|error| {\n ServerInitializeError::transport::(error, \"sending error response\")\n })?;\n return Err(ServerInitializeError::InitializeFailed(e));\n }\n };\n let peer_protocol_version = peer_info.params.protocol_version.clone();\n let protocol_version = match peer_protocol_version\n .partial_cmp(&init_response.protocol_version)\n .ok_or(ServerInitializeError::UnsupportedProtocolVersion(\n peer_protocol_version,\n ))? {\n std::cmp::Ordering::Less => peer_info.params.protocol_version.clone(),\n _ => init_response.protocol_version,\n };\n init_response.protocol_version = protocol_version;\n transport\n .send(ServerJsonRpcMessage::response(\n ServerResult::InitializeResult(init_response),\n id,\n ))\n .await\n .map_err(|error| {\n ServerInitializeError::transport::(error, \"sending initialize response\")\n })?;\n\n // Wait for initialize notification\n let notification = expect_notification(&mut transport, \"initialize notification\").await?;\n let ClientNotification::InitializedNotification(_) = notification else {\n return Err(ServerInitializeError::ExpectedInitializedNotification(\n Some(ClientJsonRpcMessage::notification(notification)),\n ));\n };\n let context = NotificationContext {\n meta: notification.get_meta().clone(),\n extensions: notification.extensions().clone(),\n peer: peer.clone(),\n };\n let _ = service.handle_notification(notification, context).await;\n // Continue processing service\n Ok(serve_inner(service, transport, peer, peer_rx, ct))\n}\n\nmacro_rules! method {\n (peer_req $method:ident $Req:ident() => $Resp: ident ) => {\n pub async fn $method(&self) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ServerRequest::$Req($Req {\n method: Default::default(),\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ClientResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident) => $Resp: ident ) => {\n pub async fn $method(&self, params: $Param) -> Result<$Resp, ServiceError> {\n let result = self\n .send_request(ServerRequest::$Req($Req {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n match result {\n ClientResult::$Resp(result) => Ok(result),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n };\n (peer_req $method:ident $Req:ident($Param: ident)) => {\n pub fn $method(\n &self,\n params: $Param,\n ) -> impl Future> + Send + '_ {\n async move {\n let result = self\n .send_request(ServerRequest::$Req($Req {\n method: Default::default(),\n params,\n }))\n .await?;\n match result {\n ClientResult::EmptyResult(_) => Ok(()),\n _ => Err(ServiceError::UnexpectedResponse),\n }\n }\n }\n };\n\n (peer_not $method:ident $Not:ident($Param: ident)) => {\n pub async fn $method(&self, params: $Param) -> Result<(), ServiceError> {\n self.send_notification(ServerNotification::$Not($Not {\n method: Default::default(),\n params,\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n (peer_not $method:ident $Not:ident) => {\n pub async fn $method(&self) -> Result<(), ServiceError> {\n self.send_notification(ServerNotification::$Not($Not {\n method: Default::default(),\n extensions: Default::default(),\n }))\n .await?;\n Ok(())\n }\n };\n}\n\nimpl Peer {\n method!(peer_req create_message CreateMessageRequest(CreateMessageRequestParam) => CreateMessageResult);\n method!(peer_req list_roots ListRootsRequest() => ListRootsResult);\n\n method!(peer_not notify_cancelled CancelledNotification(CancelledNotificationParam));\n method!(peer_not notify_progress ProgressNotification(ProgressNotificationParam));\n method!(peer_not notify_logging_message LoggingMessageNotification(LoggingMessageNotificationParam));\n method!(peer_not notify_resource_updated ResourceUpdatedNotification(ResourceUpdatedNotificationParam));\n method!(peer_not notify_resource_list_changed ResourceListChangedNotification);\n method!(peer_not notify_tool_list_changed ToolListChangedNotification);\n method!(peer_not notify_prompt_list_changed PromptListChangedNotification);\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/async_rw.rs", "use std::{marker::PhantomData, sync::Arc};\n\n// use crate::schema::*;\nuse futures::{SinkExt, StreamExt};\nuse serde::{Serialize, de::DeserializeOwned};\nuse thiserror::Error;\nuse tokio::{\n io::{AsyncRead, AsyncWrite},\n sync::Mutex,\n};\nuse tokio_util::{\n bytes::{Buf, BufMut, BytesMut},\n codec::{Decoder, Encoder, FramedRead, FramedWrite},\n};\n\nuse super::{IntoTransport, Transport};\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\npub enum TransportAdapterAsyncRW {}\n\nimpl IntoTransport for (R, W)\nwhere\n Role: ServiceRole,\n R: AsyncRead + Send + 'static + Unpin,\n W: AsyncWrite + Send + 'static + Unpin,\n{\n fn into_transport(self) -> impl Transport + 'static {\n AsyncRwTransport::new(self.0, self.1)\n }\n}\n\npub enum TransportAdapterAsyncCombinedRW {}\nimpl IntoTransport for S\nwhere\n Role: ServiceRole,\n S: AsyncRead + AsyncWrite + Send + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n IntoTransport::::into_transport(\n tokio::io::split(self),\n )\n }\n}\n\npub struct AsyncRwTransport {\n read: FramedRead>>,\n write: Arc>>>>,\n}\n\nimpl AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n pub fn new(read: R, write: W) -> Self {\n let read = FramedRead::new(\n read,\n JsonRpcMessageCodec::>::default(),\n );\n let write = Arc::new(Mutex::new(FramedWrite::new(\n write,\n JsonRpcMessageCodec::>::default(),\n )));\n Self { read, write }\n }\n}\n\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\nimpl AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n pub fn new_client(read: R, write: W) -> Self {\n Self::new(read, write)\n }\n}\n\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\nimpl AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n pub fn new_server(read: R, write: W) -> Self {\n Self::new(read, write)\n }\n}\n\nimpl Transport for AsyncRwTransport\nwhere\n R: Send + AsyncRead + Unpin,\n W: Send + AsyncWrite + Unpin + 'static,\n{\n type Error = std::io::Error;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let lock = self.write.clone();\n async move {\n let mut write = lock.lock().await;\n write.send(item).await.map_err(Into::into)\n }\n }\n\n fn receive(&mut self) -> impl Future>> {\n let next = self.read.next();\n async {\n next.await.and_then(|e| {\n e.inspect_err(|e| {\n tracing::error!(\"Error reading from stream: {}\", e);\n })\n .ok()\n })\n }\n }\n\n async fn close(&mut self) -> Result<(), Self::Error> {\n Ok(())\n }\n}\n\n#[derive(Debug, Clone)]\npub struct JsonRpcMessageCodec {\n _marker: PhantomData T>,\n next_index: usize,\n max_length: usize,\n is_discarding: bool,\n}\n\nimpl Default for JsonRpcMessageCodec {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl JsonRpcMessageCodec {\n pub fn new() -> Self {\n Self {\n _marker: PhantomData,\n next_index: 0,\n max_length: usize::MAX,\n is_discarding: false,\n }\n }\n\n pub fn new_with_max_length(max_length: usize) -> Self {\n Self {\n max_length,\n ..Self::new()\n }\n }\n\n pub fn max_length(&self) -> usize {\n self.max_length\n }\n}\n\nfn without_carriage_return(s: &[u8]) -> &[u8] {\n if let Some(&b'\\r') = s.last() {\n &s[..s.len() - 1]\n } else {\n s\n }\n}\n\n/// Check if a notification method is a standard MCP notification\n/// should update this when MCP spec is updated about new notifications\nfn is_standard_notification(method: &str) -> bool {\n matches!(\n method,\n \"notifications/cancelled\"\n | \"notifications/initialized\"\n | \"notifications/message\"\n | \"notifications/progress\"\n | \"notifications/prompts/list_changed\"\n | \"notifications/resources/list_changed\"\n | \"notifications/resources/updated\"\n | \"notifications/roots/list_changed\"\n | \"notifications/tools/list_changed\"\n )\n}\n\n/// Try to parse a message with compatibility handling for non-standard notifications\nfn try_parse_with_compatibility(\n line: &[u8],\n context: &str,\n) -> Result, JsonRpcMessageCodecError> {\n if let Ok(line_str) = std::str::from_utf8(line) {\n match serde_json::from_slice(line) {\n Ok(item) => Ok(Some(item)),\n Err(e) => {\n // Check if this is a non-standard notification that should be ignored\n if line_str.contains(\"\\\"method\\\":\\\"notifications/\") {\n // Extract the method name to check if it's standard\n if let Ok(json_value) = serde_json::from_str::(line_str) {\n if let Some(method) = json_value.get(\"method\").and_then(|m| m.as_str()) {\n if method.starts_with(\"notifications/\")\n && !is_standard_notification(method)\n {\n tracing::debug!(\n \"Ignoring non-standard notification {} {}: {}\",\n method,\n context,\n line_str\n );\n return Ok(None); // Skip this message\n }\n }\n }\n }\n\n tracing::debug!(\n \"Failed to parse message {}: {} | Error: {}\",\n context,\n line_str,\n e\n );\n Err(JsonRpcMessageCodecError::Serde(e))\n }\n }\n } else {\n serde_json::from_slice(line)\n .map(Some)\n .map_err(JsonRpcMessageCodecError::Serde)\n }\n}\n\n#[derive(Debug, Error)]\npub enum JsonRpcMessageCodecError {\n #[error(\"max line length exceeded\")]\n MaxLineLengthExceeded,\n #[error(\"serde error {0}\")]\n Serde(#[from] serde_json::Error),\n #[error(\"io error {0}\")]\n Io(#[from] std::io::Error),\n}\n\nimpl From for std::io::Error {\n fn from(value: JsonRpcMessageCodecError) -> Self {\n match value {\n JsonRpcMessageCodecError::MaxLineLengthExceeded => {\n std::io::Error::new(std::io::ErrorKind::InvalidData, value)\n }\n JsonRpcMessageCodecError::Serde(e) => e.into(),\n JsonRpcMessageCodecError::Io(e) => e,\n }\n }\n}\n\nimpl Decoder for JsonRpcMessageCodec {\n type Item = T;\n\n type Error = JsonRpcMessageCodecError;\n\n fn decode(\n &mut self,\n buf: &mut BytesMut,\n ) -> Result, JsonRpcMessageCodecError> {\n loop {\n // Determine how far into the buffer we'll search for a newline. If\n // there's no max_length set, we'll read to the end of the buffer.\n let read_to = std::cmp::min(self.max_length.saturating_add(1), buf.len());\n\n let newline_offset = buf[self.next_index..read_to]\n .iter()\n .position(|b| *b == b'\\n');\n\n match (self.is_discarding, newline_offset) {\n (true, Some(offset)) => {\n // If we found a newline, discard up to that offset and\n // then stop discarding. On the next iteration, we'll try\n // to read a line normally.\n buf.advance(offset + self.next_index + 1);\n self.is_discarding = false;\n self.next_index = 0;\n }\n (true, None) => {\n // Otherwise, we didn't find a newline, so we'll discard\n // everything we read. On the next iteration, we'll continue\n // discarding up to max_len bytes unless we find a newline.\n buf.advance(read_to);\n self.next_index = 0;\n if buf.is_empty() {\n return Ok(None);\n }\n }\n (false, Some(offset)) => {\n // Found a line!\n let newline_index = offset + self.next_index;\n self.next_index = 0;\n let line = buf.split_to(newline_index + 1);\n let line = &line[..line.len() - 1];\n let line = without_carriage_return(line);\n\n // Use compatibility handling function\n let item = match try_parse_with_compatibility(line, \"decode\")? {\n Some(item) => item,\n None => return Ok(None), // Skip non-standard message\n };\n return Ok(Some(item));\n }\n (false, None) if buf.len() > self.max_length => {\n // Reached the maximum length without finding a\n // newline, return an error and start discarding on the\n // next call.\n self.is_discarding = true;\n return Err(JsonRpcMessageCodecError::MaxLineLengthExceeded);\n }\n (false, None) => {\n // We didn't find a line or reach the length limit, so the next\n // call will resume searching at the current offset.\n self.next_index = read_to;\n return Ok(None);\n }\n }\n }\n }\n\n fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, JsonRpcMessageCodecError> {\n Ok(match self.decode(buf)? {\n Some(frame) => Some(frame),\n None => {\n self.next_index = 0;\n // No terminating newline - return remaining data, if any\n if buf.is_empty() || buf == &b\"\\r\"[..] {\n None\n } else {\n let line = buf.split_to(buf.len());\n let line = without_carriage_return(&line);\n\n // Use compatibility handling function\n let item = match try_parse_with_compatibility(line, \"decode_eof\")? {\n Some(item) => item,\n None => return Ok(None), // Skip non-standard message\n };\n Some(item)\n }\n }\n })\n }\n}\n\nimpl Encoder for JsonRpcMessageCodec {\n type Error = JsonRpcMessageCodecError;\n\n fn encode(&mut self, item: T, buf: &mut BytesMut) -> Result<(), JsonRpcMessageCodecError> {\n serde_json::to_writer(buf.writer(), &item)?;\n buf.put_u8(b'\\n');\n Ok(())\n }\n}\n\n#[cfg(test)]\nmod test {\n use futures::{Sink, Stream};\n\n use super::*;\n fn from_async_read(reader: R) -> impl Stream {\n FramedRead::new(reader, JsonRpcMessageCodec::::default()).filter_map(|result| {\n if let Err(e) = &result {\n tracing::error!(\"Error reading from stream: {}\", e);\n }\n futures::future::ready(result.ok())\n })\n }\n\n fn from_async_write(\n writer: W,\n ) -> impl Sink {\n FramedWrite::new(writer, JsonRpcMessageCodec::::default()).sink_map_err(Into::into)\n }\n #[tokio::test]\n async fn test_decode() {\n use futures::StreamExt;\n use tokio::io::BufReader;\n\n let data = r#\"{\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":1}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":2}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":3}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":4}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":5}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":6}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":7}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":8}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":9}\n {\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[23,42],\"id\":10}\n\n \"#;\n\n let mut cursor = BufReader::new(data.as_bytes());\n let mut stream = from_async_read::(&mut cursor);\n\n for i in 1..=10 {\n let item = stream.next().await.unwrap();\n assert_eq!(\n item,\n serde_json::json!({\n \"jsonrpc\": \"2.0\",\n \"method\": \"subtract\",\n \"params\": if i % 2 != 0 { [42, 23] } else { [23, 42] },\n \"id\": i,\n })\n );\n }\n }\n\n #[tokio::test]\n async fn test_encode() {\n let test_messages = vec![\n serde_json::json!({\n \"jsonrpc\": \"2.0\",\n \"method\": \"subtract\",\n \"params\": [42, 23],\n \"id\": 1,\n }),\n serde_json::json!({\n \"jsonrpc\": \"2.0\",\n \"method\": \"subtract\",\n \"params\": [23, 42],\n \"id\": 2,\n }),\n ];\n\n // Create a buffer to write to\n let mut buffer = Vec::new();\n let mut writer = from_async_write(&mut buffer);\n\n // Write the test messages\n for message in test_messages.iter() {\n writer.send(message.clone()).await.unwrap();\n }\n writer.close().await.unwrap();\n drop(writer);\n // Parse the buffer back into lines and check each one\n let output = String::from_utf8_lossy(&buffer);\n let mut lines = output.lines();\n\n for expected_message in test_messages {\n let line = lines.next().unwrap();\n let parsed_message: serde_json::Value = serde_json::from_str(line).unwrap();\n assert_eq!(parsed_message, expected_message);\n }\n\n // Make sure there are no extra lines\n assert!(lines.next().is_none());\n }\n\n #[test]\n fn test_standard_notification_check() {\n // Test that all standard notifications are recognized\n assert!(is_standard_notification(\"notifications/cancelled\"));\n assert!(is_standard_notification(\"notifications/initialized\"));\n assert!(is_standard_notification(\"notifications/progress\"));\n assert!(is_standard_notification(\n \"notifications/resources/list_changed\"\n ));\n assert!(is_standard_notification(\"notifications/resources/updated\"));\n assert!(is_standard_notification(\n \"notifications/prompts/list_changed\"\n ));\n assert!(is_standard_notification(\"notifications/tools/list_changed\"));\n assert!(is_standard_notification(\"notifications/message\"));\n assert!(is_standard_notification(\"notifications/roots/list_changed\"));\n\n // Test that non-standard notifications are not recognized\n assert!(!is_standard_notification(\"notifications/stderr\"));\n assert!(!is_standard_notification(\"notifications/custom\"));\n assert!(!is_standard_notification(\"notifications/debug\"));\n assert!(!is_standard_notification(\"some/other/method\"));\n }\n\n #[test]\n fn test_compatibility_function() {\n // Test the compatibility function directly\n let stderr_message =\n r#\"{\"method\":\"notifications/stderr\",\"params\":{\"content\":\"stderr message\"}}\"#;\n let custom_message = r#\"{\"method\":\"notifications/custom\",\"params\":{\"data\":\"custom\"}}\"#;\n let standard_message =\n r#\"{\"method\":\"notifications/message\",\"params\":{\"level\":\"info\",\"data\":\"standard\"}}\"#;\n let progress_message = r#\"{\"method\":\"notifications/progress\",\"params\":{\"progressToken\":\"token\",\"progress\":50}}\"#;\n\n // Test with valid JSON - all should parse successfully\n let result1 =\n try_parse_with_compatibility::(stderr_message.as_bytes(), \"test\");\n let result2 =\n try_parse_with_compatibility::(custom_message.as_bytes(), \"test\");\n let result3 =\n try_parse_with_compatibility::(standard_message.as_bytes(), \"test\");\n let result4 =\n try_parse_with_compatibility::(progress_message.as_bytes(), \"test\");\n\n // All should parse successfully since they're valid JSON\n assert!(result1.is_ok());\n assert!(result2.is_ok());\n assert!(result3.is_ok());\n assert!(result4.is_ok());\n\n // Standard notifications should return Some(value)\n assert!(result3.unwrap().is_some());\n assert!(result4.unwrap().is_some());\n\n println!(\"Standard notifications are preserved, non-standard are handled gracefully\");\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server/tool.rs", "use std::{\n any::TypeId, borrow::Cow, collections::HashMap, future::Ready, marker::PhantomData, sync::Arc,\n};\n\nuse futures::future::{BoxFuture, FutureExt};\nuse schemars::{JsonSchema, transform::AddNullable};\nuse serde::{Deserialize, Serialize, de::DeserializeOwned};\nuse tokio_util::sync::CancellationToken;\n\npub use super::router::tool::{ToolRoute, ToolRouter};\nuse crate::{\n RoleServer,\n model::{CallToolRequestParam, CallToolResult, IntoContents, JsonObject},\n schemars::generate::SchemaSettings,\n service::RequestContext,\n};\n/// A shortcut for generating a JSON schema for a type.\npub fn schema_for_type() -> JsonObject {\n // explicitly to align json schema version to official specifications.\n // https://github.com/modelcontextprotocol/modelcontextprotocol/blob/main/schema/2025-03-26/schema.json\n // TODO: update to 2020-12 waiting for the mcp spec update\n let mut settings = SchemaSettings::draft07();\n settings.transforms = vec![Box::new(AddNullable::default())];\n let generator = settings.into_generator();\n let schema = generator.into_root_schema_for::();\n let object = serde_json::to_value(schema).expect(\"failed to serialize schema\");\n match object {\n serde_json::Value::Object(object) => object,\n _ => panic!(\"unexpected schema value\"),\n }\n}\n\n/// Call [`schema_for_type`] with a cache\npub fn cached_schema_for_type() -> Arc {\n thread_local! {\n static CACHE_FOR_TYPE: std::sync::RwLock>> = Default::default();\n };\n CACHE_FOR_TYPE.with(|cache| {\n if let Some(x) = cache\n .read()\n .expect(\"schema cache lock poisoned\")\n .get(&TypeId::of::())\n {\n x.clone()\n } else {\n let schema = schema_for_type::();\n let schema = Arc::new(schema);\n cache\n .write()\n .expect(\"schema cache lock poisoned\")\n .insert(TypeId::of::(), schema.clone());\n schema\n }\n })\n}\n\n/// Deserialize a JSON object into a type\npub fn parse_json_object(input: JsonObject) -> Result {\n serde_json::from_value(serde_json::Value::Object(input)).map_err(|e| {\n crate::ErrorData::invalid_params(\n format!(\"failed to deserialize parameters: {error}\", error = e),\n None,\n )\n })\n}\npub struct ToolCallContext<'s, S> {\n pub request_context: RequestContext,\n pub service: &'s S,\n pub name: Cow<'static, str>,\n pub arguments: Option,\n}\n\nimpl<'s, S> ToolCallContext<'s, S> {\n pub fn new(\n service: &'s S,\n CallToolRequestParam { name, arguments }: CallToolRequestParam,\n request_context: RequestContext,\n ) -> Self {\n Self {\n request_context,\n service,\n name,\n arguments,\n }\n }\n pub fn name(&self) -> &str {\n &self.name\n }\n pub fn request_context(&self) -> &RequestContext {\n &self.request_context\n }\n}\n\npub trait FromToolCallContextPart: Sized {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result;\n}\n\npub trait IntoCallToolResult {\n fn into_call_tool_result(self) -> Result;\n}\n\nimpl IntoCallToolResult for T {\n fn into_call_tool_result(self) -> Result {\n Ok(CallToolResult::success(self.into_contents()))\n }\n}\n\nimpl IntoCallToolResult for Result {\n fn into_call_tool_result(self) -> Result {\n match self {\n Ok(value) => Ok(CallToolResult::success(value.into_contents())),\n Err(error) => Ok(CallToolResult::error(error.into_contents())),\n }\n }\n}\n\nimpl IntoCallToolResult for Result {\n fn into_call_tool_result(self) -> Result {\n match self {\n Ok(value) => value.into_call_tool_result(),\n Err(error) => Err(error),\n }\n }\n}\n\npin_project_lite::pin_project! {\n #[project = IntoCallToolResultFutProj]\n pub enum IntoCallToolResultFut {\n Pending {\n #[pin]\n fut: F,\n _marker: PhantomData,\n },\n Ready {\n #[pin]\n result: Ready>,\n }\n }\n}\n\nimpl Future for IntoCallToolResultFut\nwhere\n F: Future,\n R: IntoCallToolResult,\n{\n type Output = Result;\n\n fn poll(\n self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll {\n match self.project() {\n IntoCallToolResultFutProj::Pending { fut, _marker } => {\n fut.poll(cx).map(IntoCallToolResult::into_call_tool_result)\n }\n IntoCallToolResultFutProj::Ready { result } => result.poll(cx),\n }\n }\n}\n\nimpl IntoCallToolResult for Result {\n fn into_call_tool_result(self) -> Result {\n self\n }\n}\n\npub trait CallToolHandler {\n fn call(\n self,\n context: ToolCallContext<'_, S>,\n ) -> BoxFuture<'_, Result>;\n}\n\npub type DynCallToolHandler = dyn for<'s> Fn(ToolCallContext<'s, S>) -> BoxFuture<'s, Result>\n + Send\n + Sync;\n\n/// Parameter Extractor\n///\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(transparent)]\npub struct Parameters

(pub P);\n\nimpl JsonSchema for Parameters

{\n fn schema_name() -> Cow<'static, str> {\n P::schema_name()\n }\n\n fn json_schema(generator: &mut schemars::SchemaGenerator) -> schemars::Schema {\n P::json_schema(generator)\n }\n}\n\nimpl FromToolCallContextPart for CancellationToken {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(context.request_context.ct.clone())\n }\n}\n\npub struct ToolName(pub Cow<'static, str>);\n\nimpl FromToolCallContextPart for ToolName {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(Self(context.name.clone()))\n }\n}\n\nimpl FromToolCallContextPart for Parameters

\nwhere\n P: DeserializeOwned,\n{\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let arguments = context.arguments.take().unwrap_or_default();\n let value: P =\n serde_json::from_value(serde_json::Value::Object(arguments)).map_err(|e| {\n crate::ErrorData::invalid_params(\n format!(\"failed to deserialize parameters: {error}\", error = e),\n None,\n )\n })?;\n Ok(Parameters(value))\n }\n}\n\nimpl FromToolCallContextPart for JsonObject {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let object = context.arguments.take().unwrap_or_default();\n Ok(object)\n }\n}\n\nimpl FromToolCallContextPart for crate::model::Extensions {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let extensions = context.request_context.extensions.clone();\n Ok(extensions)\n }\n}\n\npub struct Extension(pub T);\n\nimpl FromToolCallContextPart for Extension\nwhere\n T: Send + Sync + 'static + Clone,\n{\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let extension = context\n .request_context\n .extensions\n .get::()\n .cloned()\n .ok_or_else(|| {\n crate::ErrorData::invalid_params(\n format!(\"missing extension {}\", std::any::type_name::()),\n None,\n )\n })?;\n Ok(Extension(extension))\n }\n}\n\nimpl FromToolCallContextPart for crate::Peer {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let peer = context.request_context.peer.clone();\n Ok(peer)\n }\n}\n\nimpl FromToolCallContextPart for crate::model::Meta {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n let mut meta = crate::model::Meta::default();\n std::mem::swap(&mut meta, &mut context.request_context.meta);\n Ok(meta)\n }\n}\n\npub struct RequestId(pub crate::model::RequestId);\nimpl FromToolCallContextPart for RequestId {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(RequestId(context.request_context.id.clone()))\n }\n}\n\nimpl FromToolCallContextPart for RequestContext {\n fn from_tool_call_context_part(\n context: &mut ToolCallContext,\n ) -> Result {\n Ok(context.request_context.clone())\n }\n}\n\nimpl<'s, S> ToolCallContext<'s, S> {\n pub fn invoke(self, h: H) -> BoxFuture<'s, Result>\n where\n H: CallToolHandler,\n {\n h.call(self)\n }\n}\n#[allow(clippy::type_complexity)]\npub struct AsyncAdapter(PhantomData fn(Fut) -> R>);\npub struct SyncAdapter(PhantomData R>);\n// #[allow(clippy::type_complexity)]\npub struct AsyncMethodAdapter(PhantomData R>);\npub struct SyncMethodAdapter(PhantomData R>);\n\nmacro_rules! impl_for {\n ($($T: ident)*) => {\n impl_for!([] [$($T)*]);\n };\n // finished\n ([$($Tn: ident)*] []) => {\n impl_for!(@impl $($Tn)*);\n };\n ([$($Tn: ident)*] [$Tn_1: ident $($Rest: ident)*]) => {\n impl_for!(@impl $($Tn)*);\n impl_for!([$($Tn)* $Tn_1] [$($Rest)*]);\n };\n (@impl $($Tn: ident)*) => {\n impl<$($Tn,)* S, F, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart ,\n )*\n F: FnOnce(&S, $($Tn,)*) -> BoxFuture<'_, R>,\n\n // Need RTN support here(I guess), https://github.com/rust-lang/rust/pull/138424\n // Fut: Future + Send + 'a,\n R: IntoCallToolResult + Send + 'static,\n S: Send + Sync + 'static,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext<'_, S>,\n ) -> BoxFuture<'_, Result>{\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n let service = context.service;\n let fut = self(service, $($Tn,)*);\n async move {\n let result = fut.await;\n result.into_call_tool_result()\n }.boxed()\n }\n }\n\n impl<$($Tn,)* S, F, Fut, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart ,\n )*\n F: FnOnce($($Tn,)*) -> Fut + Send + ,\n Fut: Future + Send + 'static,\n R: IntoCallToolResult + Send + 'static,\n S: Send + Sync,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext,\n ) -> BoxFuture<'static, Result>{\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n let fut = self($($Tn,)*);\n async move {\n let result = fut.await;\n result.into_call_tool_result()\n }.boxed()\n }\n }\n\n impl<$($Tn,)* S, F, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart + ,\n )*\n F: FnOnce(&S, $($Tn,)*) -> R + Send + ,\n R: IntoCallToolResult + Send + ,\n S: Send + Sync,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext,\n ) -> BoxFuture<'static, Result> {\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n std::future::ready(self(context.service, $($Tn,)*).into_call_tool_result()).boxed()\n }\n }\n\n impl<$($Tn,)* S, F, R> CallToolHandler> for F\n where\n $(\n $Tn: FromToolCallContextPart + ,\n )*\n F: FnOnce($($Tn,)*) -> R + Send + ,\n R: IntoCallToolResult + Send + ,\n S: Send + Sync,\n {\n #[allow(unused_variables, non_snake_case, unused_mut)]\n fn call(\n self,\n mut context: ToolCallContext,\n ) -> BoxFuture<'static, Result> {\n $(\n let result = $Tn::from_tool_call_context_part(&mut context);\n let $Tn = match result {\n Ok(value) => value,\n Err(e) => return std::future::ready(Err(e)).boxed(),\n };\n )*\n std::future::ready(self($($Tn,)*).into_call_tool_result()).boxed()\n }\n }\n };\n}\nimpl_for!(T0 T1 T2 T3 T4 T5 T6 T7 T8 T9 T10 T11 T12 T13 T14 T15);\n"], ["/rust-sdk/crates/rmcp/src/transport/worker.rs", "use std::borrow::Cow;\n\nuse tokio_util::sync::CancellationToken;\nuse tracing::{Instrument, Level};\n\nuse super::{IntoTransport, Transport};\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\n#[derive(Debug, thiserror::Error)]\npub enum WorkerQuitReason {\n #[error(\"Join error {0}\")]\n Join(#[from] tokio::task::JoinError),\n #[error(\"Transport fatal {error}, when {context}\")]\n Fatal {\n error: Cow<'static, str>,\n context: Cow<'static, str>,\n },\n #[error(\"Transport canncelled\")]\n Cancelled,\n #[error(\"Transport closed\")]\n TransportClosed,\n #[error(\"Handler terminated\")]\n HandlerTerminated,\n}\n\nimpl WorkerQuitReason {\n pub fn fatal(msg: impl Into>, context: impl Into>) -> Self {\n Self::Fatal {\n error: msg.into(),\n context: context.into(),\n }\n }\n pub fn fatal_context(\n context: impl Into>,\n ) -> impl FnOnce(E) -> Self {\n |e| Self::Fatal {\n error: Cow::Owned(format!(\"{e}\")),\n context: context.into(),\n }\n }\n}\n\npub trait Worker: Sized + Send + 'static {\n type Error: std::error::Error + Send + Sync + 'static;\n type Role: ServiceRole;\n fn err_closed() -> Self::Error;\n fn err_join(e: tokio::task::JoinError) -> Self::Error;\n fn run(\n self,\n context: WorkerContext,\n ) -> impl Future> + Send;\n fn config(&self) -> WorkerConfig {\n WorkerConfig::default()\n }\n}\n\npub struct WorkerSendRequest {\n pub message: TxJsonRpcMessage,\n pub responder: tokio::sync::oneshot::Sender>,\n}\n\npub struct WorkerTransport {\n rx: tokio::sync::mpsc::Receiver>,\n send_service: tokio::sync::mpsc::Sender>,\n join_handle: Option>>,\n _drop_guard: tokio_util::sync::DropGuard,\n ct: CancellationToken,\n}\n\npub struct WorkerConfig {\n pub name: Option,\n pub channel_buffer_capacity: usize,\n}\n\nimpl Default for WorkerConfig {\n fn default() -> Self {\n Self {\n name: None,\n channel_buffer_capacity: 16,\n }\n }\n}\npub enum WorkerAdapter {}\n\nimpl IntoTransport for W {\n fn into_transport(self) -> impl Transport + 'static {\n WorkerTransport::spawn(self)\n }\n}\n\nimpl WorkerTransport {\n pub fn cancel_token(&self) -> CancellationToken {\n self.ct.clone()\n }\n pub fn spawn(worker: W) -> Self {\n Self::spawn_with_ct(worker, CancellationToken::new())\n }\n pub fn spawn_with_ct(worker: W, transport_task_ct: CancellationToken) -> Self {\n let config = worker.config();\n let worker_name = config.name;\n let (to_transport_tx, from_handler_rx) =\n tokio::sync::mpsc::channel::>(config.channel_buffer_capacity);\n let (to_handler_tx, from_transport_rx) =\n tokio::sync::mpsc::channel::>(config.channel_buffer_capacity);\n let context = WorkerContext {\n to_handler_tx,\n from_handler_rx,\n cancellation_token: transport_task_ct.clone(),\n };\n\n let join_handle = tokio::spawn(async move {\n worker\n .run(context)\n .instrument(tracing::span!(\n Level::TRACE,\n \"transport_worker\",\n name = worker_name,\n ))\n .await\n .inspect_err(|e| match e {\n WorkerQuitReason::Cancelled\n | WorkerQuitReason::TransportClosed\n | WorkerQuitReason::HandlerTerminated => {\n tracing::debug!(\"worker quit with reason: {:?}\", e);\n }\n WorkerQuitReason::Join(e) => {\n tracing::error!(\"worker quit with join error: {:?}\", e);\n }\n WorkerQuitReason::Fatal { error, context } => {\n tracing::error!(\"worker quit with fatal: {error}, when {context}\");\n }\n })\n .inspect(|_| {\n tracing::debug!(\"worker quit\");\n })\n });\n Self {\n rx: from_transport_rx,\n send_service: to_transport_tx,\n join_handle: Some(join_handle),\n ct: transport_task_ct.clone(),\n _drop_guard: transport_task_ct.drop_guard(),\n }\n }\n}\n\npub struct SendRequest {\n pub message: TxJsonRpcMessage,\n pub responder: tokio::sync::oneshot::Sender>,\n}\n\npub struct WorkerContext {\n pub to_handler_tx: tokio::sync::mpsc::Sender>,\n pub from_handler_rx: tokio::sync::mpsc::Receiver>,\n pub cancellation_token: CancellationToken,\n}\n\nimpl WorkerContext {\n pub async fn send_to_handler(\n &mut self,\n item: RxJsonRpcMessage,\n ) -> Result<(), WorkerQuitReason> {\n self.to_handler_tx\n .send(item)\n .await\n .map_err(|_| WorkerQuitReason::HandlerTerminated)\n }\n\n pub async fn recv_from_handler(&mut self) -> Result, WorkerQuitReason> {\n self.from_handler_rx\n .recv()\n .await\n .ok_or(WorkerQuitReason::HandlerTerminated)\n }\n}\n\nimpl Transport for WorkerTransport {\n type Error = W::Error;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n let tx = self.send_service.clone();\n let (responder, receiver) = tokio::sync::oneshot::channel();\n let request = WorkerSendRequest {\n message: item,\n responder,\n };\n async move {\n tx.send(request).await.map_err(|_| W::err_closed())?;\n receiver.await.map_err(|_| W::err_closed())??;\n Ok(())\n }\n }\n async fn receive(&mut self) -> Option> {\n self.rx.recv().await\n }\n async fn close(&mut self) -> Result<(), Self::Error> {\n if let Some(handle) = self.join_handle.take() {\n self.ct.cancel();\n let _quit_reason = handle.await.map_err(W::err_join)?;\n Ok(())\n } else {\n Ok(())\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/session/never.rs", "use futures::Stream;\nuse thiserror::Error;\n\nuse super::{ServerSseMessage, SessionId, SessionManager};\nuse crate::{\n RoleServer,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::Transport,\n};\n\n#[derive(Debug, Clone, Error)]\n#[error(\"Session management is not supported\")]\npub struct ErrorSessionManagementNotSupported;\n#[derive(Debug, Clone, Default)]\npub struct NeverSessionManager {}\npub enum NeverTransport {}\nimpl Transport for NeverTransport {\n type Error = ErrorSessionManagementNotSupported;\n\n fn send(\n &mut self,\n _item: ServerJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn receive(&mut self) -> impl Future> {\n futures::future::ready(None)\n }\n\n async fn close(&mut self) -> Result<(), Self::Error> {\n Err(ErrorSessionManagementNotSupported)\n }\n}\n\nimpl SessionManager for NeverSessionManager {\n type Error = ErrorSessionManagementNotSupported;\n type Transport = NeverTransport;\n\n fn create_session(\n &self,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn initialize_session(\n &self,\n _id: &SessionId,\n _message: ClientJsonRpcMessage,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn has_session(\n &self,\n _id: &SessionId,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn close_session(\n &self,\n _id: &SessionId,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n\n fn create_stream(\n &self,\n _id: &SessionId,\n _message: ClientJsonRpcMessage,\n ) -> impl Future<\n Output = Result + Send + 'static, Self::Error>,\n > + Send {\n futures::future::ready(Result::, _>::Err(\n ErrorSessionManagementNotSupported,\n ))\n }\n fn create_standalone_stream(\n &self,\n _id: &SessionId,\n ) -> impl Future<\n Output = Result + Send + 'static, Self::Error>,\n > + Send {\n futures::future::ready(Result::, _>::Err(\n ErrorSessionManagementNotSupported,\n ))\n }\n fn resume(\n &self,\n _id: &SessionId,\n _last_event_id: String,\n ) -> impl Future<\n Output = Result + Send + 'static, Self::Error>,\n > + Send {\n futures::future::ready(Result::, _>::Err(\n ErrorSessionManagementNotSupported,\n ))\n }\n fn accept_message(\n &self,\n _id: &SessionId,\n _message: ClientJsonRpcMessage,\n ) -> impl Future> + Send {\n futures::future::ready(Err(ErrorSessionManagementNotSupported))\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server/session.rs", "use futures::Stream;\n\npub use crate::transport::common::server_side_http::SessionId;\nuse crate::{\n RoleServer,\n model::{ClientJsonRpcMessage, ServerJsonRpcMessage},\n transport::common::server_side_http::ServerSseMessage,\n};\n\npub mod local;\npub mod never;\n\npub trait SessionManager: Send + Sync + 'static {\n type Error: std::error::Error + Send + 'static;\n type Transport: crate::transport::Transport;\n /// Create a new session with the given id and configuration.\n fn create_session(\n &self,\n ) -> impl Future> + Send;\n fn initialize_session(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> impl Future> + Send;\n fn has_session(&self, id: &SessionId)\n -> impl Future> + Send;\n fn close_session(&self, id: &SessionId)\n -> impl Future> + Send;\n fn create_stream(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> impl Future<\n Output = Result + Send + Sync + 'static, Self::Error>,\n > + Send;\n fn accept_message(\n &self,\n id: &SessionId,\n message: ClientJsonRpcMessage,\n ) -> impl Future> + Send;\n fn create_standalone_stream(\n &self,\n id: &SessionId,\n ) -> impl Future<\n Output = Result + Send + Sync + 'static, Self::Error>,\n > + Send;\n fn resume(\n &self,\n id: &SessionId,\n last_event_id: String,\n ) -> impl Future<\n Output = Result + Send + Sync + 'static, Self::Error>,\n > + Send;\n}\n"], ["/rust-sdk/crates/rmcp/src/model.rs", "use std::{borrow::Cow, sync::Arc};\nmod annotated;\nmod capabilities;\nmod content;\nmod extension;\nmod meta;\nmod prompt;\nmod resource;\nmod serde_impl;\nmod tool;\npub use annotated::*;\npub use capabilities::*;\npub use content::*;\npub use extension::*;\npub use meta::*;\npub use prompt::*;\npub use resource::*;\nuse serde::{Deserialize, Serialize};\nuse serde_json::Value;\npub use tool::*;\n\n/// A JSON object type alias for convenient handling of JSON data.\n///\n/// You can use [`crate::object!`] or [`crate::model::object`] to create a json object quickly.\n/// This is commonly used for storing arbitrary JSON data in MCP messages.\npub type JsonObject = serde_json::Map;\n\n/// unwrap the JsonObject under [`serde_json::Value`]\n///\n/// # Panic\n/// This will panic when the value is not a object in debug mode.\npub fn object(value: serde_json::Value) -> JsonObject {\n debug_assert!(value.is_object());\n match value {\n serde_json::Value::Object(map) => map,\n _ => JsonObject::default(),\n }\n}\n\n/// Use this macro just like [`serde_json::json!`]\n#[cfg(feature = \"macros\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"macros\")))]\n#[macro_export]\nmacro_rules! object {\n ({$($tt:tt)*}) => {\n $crate::model::object(serde_json::json! {\n {$($tt)*}\n })\n };\n}\n\n/// This is commonly used for representing empty objects in MCP messages.\n///\n/// without returning any specific data.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Copy, Eq)]\n#[cfg_attr(feature = \"server\", derive(schemars::JsonSchema))]\npub struct EmptyObject {}\n\npub trait ConstString: Default {\n const VALUE: &str;\n}\n#[macro_export]\nmacro_rules! const_string {\n ($name:ident = $value:literal) => {\n #[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]\n pub struct $name;\n\n impl ConstString for $name {\n const VALUE: &str = $value;\n }\n\n impl serde::Serialize for $name {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n $value.serialize(serializer)\n }\n }\n\n impl<'de> serde::Deserialize<'de> for $name {\n fn deserialize(deserializer: D) -> Result<$name, D::Error>\n where\n D: serde::Deserializer<'de>,\n {\n let s: String = serde::Deserialize::deserialize(deserializer)?;\n if s == $value {\n Ok($name)\n } else {\n Err(serde::de::Error::custom(format!(concat!(\n \"expect const string value \\\"\",\n $value,\n \"\\\"\"\n ))))\n }\n }\n }\n\n #[cfg(feature = \"schemars\")]\n impl schemars::JsonSchema for $name {\n fn schema_name() -> Cow<'static, str> {\n Cow::Borrowed(stringify!($name))\n }\n\n fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema {\n use serde_json::{Map, json};\n\n let mut schema_map = Map::new();\n schema_map.insert(\"type\".to_string(), json!(\"string\"));\n schema_map.insert(\"format\".to_string(), json!(\"const\"));\n schema_map.insert(\"const\".to_string(), json!($value));\n\n schemars::Schema::from(schema_map)\n }\n }\n };\n}\n\nconst_string!(JsonRpcVersion2_0 = \"2.0\");\n\n// =============================================================================\n// CORE PROTOCOL TYPES\n// =============================================================================\n\n/// Represents the MCP protocol version used for communication.\n///\n/// This ensures compatibility between clients and servers by specifying\n/// which version of the Model Context Protocol is being used.\n#[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ProtocolVersion(Cow<'static, str>);\n\nimpl Default for ProtocolVersion {\n fn default() -> Self {\n Self::LATEST\n }\n}\n\nimpl std::fmt::Display for ProtocolVersion {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n self.0.fmt(f)\n }\n}\n\nimpl ProtocolVersion {\n pub const V_2025_03_26: Self = Self(Cow::Borrowed(\"2025-03-26\"));\n pub const V_2024_11_05: Self = Self(Cow::Borrowed(\"2024-11-05\"));\n pub const LATEST: Self = Self::V_2025_03_26;\n}\n\nimpl Serialize for ProtocolVersion {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n self.0.serialize(serializer)\n }\n}\n\nimpl<'de> Deserialize<'de> for ProtocolVersion {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let s: String = Deserialize::deserialize(deserializer)?;\n #[allow(clippy::single_match)]\n match s.as_str() {\n \"2024-11-05\" => return Ok(ProtocolVersion::V_2024_11_05),\n \"2025-03-26\" => return Ok(ProtocolVersion::V_2025_03_26),\n _ => {}\n }\n Ok(ProtocolVersion(Cow::Owned(s)))\n }\n}\n\n/// A flexible identifier type that can be either a number or a string.\n///\n/// This is commonly used for request IDs and other identifiers in JSON-RPC\n/// where the specification allows both numeric and string values.\n#[derive(Debug, Clone, Eq, PartialEq, Hash)]\npub enum NumberOrString {\n /// A numeric identifier\n Number(u32),\n /// A string identifier \n String(Arc),\n}\n\nimpl NumberOrString {\n pub fn into_json_value(self) -> Value {\n match self {\n NumberOrString::Number(n) => Value::Number(serde_json::Number::from(n)),\n NumberOrString::String(s) => Value::String(s.to_string()),\n }\n }\n}\n\nimpl std::fmt::Display for NumberOrString {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n NumberOrString::Number(n) => n.fmt(f),\n NumberOrString::String(s) => s.fmt(f),\n }\n }\n}\n\nimpl Serialize for NumberOrString {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n match self {\n NumberOrString::Number(n) => n.serialize(serializer),\n NumberOrString::String(s) => s.serialize(serializer),\n }\n }\n}\n\nimpl<'de> Deserialize<'de> for NumberOrString {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let value: Value = Deserialize::deserialize(deserializer)?;\n match value {\n Value::Number(n) => Ok(NumberOrString::Number(\n n.as_u64()\n .ok_or(serde::de::Error::custom(\"Expect an integer\"))? as u32,\n )),\n Value::String(s) => Ok(NumberOrString::String(s.into())),\n _ => Err(serde::de::Error::custom(\"Expect number or string\")),\n }\n }\n}\n\n#[cfg(feature = \"schemars\")]\nimpl schemars::JsonSchema for NumberOrString {\n fn schema_name() -> Cow<'static, str> {\n Cow::Borrowed(\"NumberOrString\")\n }\n\n fn json_schema(_: &mut schemars::SchemaGenerator) -> schemars::Schema {\n use serde_json::{Map, json};\n\n let mut number_schema = Map::new();\n number_schema.insert(\"type\".to_string(), json!(\"number\"));\n\n let mut string_schema = Map::new();\n string_schema.insert(\"type\".to_string(), json!(\"string\"));\n\n let mut schema_map = Map::new();\n schema_map.insert(\"oneOf\".to_string(), json!([number_schema, string_schema]));\n\n schemars::Schema::from(schema_map)\n }\n}\n\n/// Type alias for request identifiers used in JSON-RPC communication.\npub type RequestId = NumberOrString;\n\n/// A token used to track the progress of long-running operations.\n///\n/// Progress tokens allow clients and servers to associate progress notifications\n/// with specific requests, enabling real-time updates on operation status.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Hash, Eq)]\n#[serde(transparent)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ProgressToken(pub NumberOrString);\n\n// =============================================================================\n// JSON-RPC MESSAGE STRUCTURES\n// =============================================================================\n\n/// Represents a JSON-RPC request with method, parameters, and extensions.\n///\n/// This is the core structure for all MCP requests, containing:\n/// - `method`: The name of the method being called\n/// - `params`: The parameters for the method\n/// - `extensions`: Additional context data (similar to HTTP headers)\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Request {\n pub method: M,\n pub params: P,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl Request {\n pub fn new(params: P) -> Self {\n Self {\n method: Default::default(),\n params,\n extensions: Extensions::default(),\n }\n }\n}\n\nimpl GetExtensions for Request {\n fn extensions(&self) -> &Extensions {\n &self.extensions\n }\n fn extensions_mut(&mut self) -> &mut Extensions {\n &mut self.extensions\n }\n}\n\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RequestOptionalParam {\n pub method: M,\n // #[serde(skip_serializing_if = \"Option::is_none\")]\n pub params: Option

,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl RequestOptionalParam {\n pub fn with_param(params: P) -> Self {\n Self {\n method: Default::default(),\n params: Some(params),\n extensions: Extensions::default(),\n }\n }\n}\n\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RequestNoParam {\n pub method: M,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl GetExtensions for RequestNoParam {\n fn extensions(&self) -> &Extensions {\n &self.extensions\n }\n fn extensions_mut(&mut self) -> &mut Extensions {\n &mut self.extensions\n }\n}\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Notification {\n pub method: M,\n pub params: P,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\nimpl Notification {\n pub fn new(params: P) -> Self {\n Self {\n method: Default::default(),\n params,\n extensions: Extensions::default(),\n }\n }\n}\n\n#[derive(Debug, Clone, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct NotificationNoParam {\n pub method: M,\n /// extensions will carry anything possible in the context, including [`Meta`]\n ///\n /// this is similar with the Extensions in `http` crate\n #[cfg_attr(feature = \"schemars\", schemars(skip))]\n pub extensions: Extensions,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcRequest {\n pub jsonrpc: JsonRpcVersion2_0,\n pub id: RequestId,\n #[serde(flatten)]\n pub request: R,\n}\n\ntype DefaultResponse = JsonObject;\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcResponse {\n pub jsonrpc: JsonRpcVersion2_0,\n pub id: RequestId,\n pub result: R,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcError {\n pub jsonrpc: JsonRpcVersion2_0,\n pub id: RequestId,\n pub error: ErrorData,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct JsonRpcNotification {\n pub jsonrpc: JsonRpcVersion2_0,\n #[serde(flatten)]\n pub notification: N,\n}\n\n/// Standard JSON-RPC error codes used throughout the MCP protocol.\n///\n/// These codes follow the JSON-RPC 2.0 specification and provide\n/// standardized error reporting across all MCP implementations.\n#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(transparent)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ErrorCode(pub i32);\n\nimpl ErrorCode {\n pub const RESOURCE_NOT_FOUND: Self = Self(-32002);\n pub const INVALID_REQUEST: Self = Self(-32600);\n pub const METHOD_NOT_FOUND: Self = Self(-32601);\n pub const INVALID_PARAMS: Self = Self(-32602);\n pub const INTERNAL_ERROR: Self = Self(-32603);\n pub const PARSE_ERROR: Self = Self(-32700);\n}\n\n/// Error information for JSON-RPC error responses.\n///\n/// This structure follows the JSON-RPC 2.0 specification for error reporting,\n/// providing a standardized way to communicate errors between clients and servers.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ErrorData {\n /// The error type that occurred (using standard JSON-RPC error codes)\n pub code: ErrorCode,\n\n /// A short description of the error. The message SHOULD be limited to a concise single sentence.\n pub message: Cow<'static, str>,\n\n /// Additional information about the error. The value of this member is defined by the\n /// sender (e.g. detailed error information, nested errors etc.).\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub data: Option,\n}\n\nimpl ErrorData {\n pub fn new(\n code: ErrorCode,\n message: impl Into>,\n data: Option,\n ) -> Self {\n Self {\n code,\n message: message.into(),\n data,\n }\n }\n pub fn resource_not_found(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::RESOURCE_NOT_FOUND, message, data)\n }\n pub fn parse_error(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::PARSE_ERROR, message, data)\n }\n pub fn invalid_request(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::INVALID_REQUEST, message, data)\n }\n pub fn method_not_found() -> Self {\n Self::new(ErrorCode::METHOD_NOT_FOUND, M::VALUE, None)\n }\n pub fn invalid_params(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::INVALID_PARAMS, message, data)\n }\n pub fn internal_error(message: impl Into>, data: Option) -> Self {\n Self::new(ErrorCode::INTERNAL_ERROR, message, data)\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum JsonRpcBatchRequestItem {\n Request(JsonRpcRequest),\n Notification(JsonRpcNotification),\n}\n\nimpl JsonRpcBatchRequestItem {\n pub fn into_non_batch_message(self) -> JsonRpcMessage {\n match self {\n JsonRpcBatchRequestItem::Request(r) => JsonRpcMessage::Request(r),\n JsonRpcBatchRequestItem::Notification(n) => JsonRpcMessage::Notification(n),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum JsonRpcBatchResponseItem {\n Response(JsonRpcResponse),\n Error(JsonRpcError),\n}\n\nimpl JsonRpcBatchResponseItem {\n pub fn into_non_batch_message(self) -> JsonRpcMessage {\n match self {\n JsonRpcBatchResponseItem::Response(r) => JsonRpcMessage::Response(r),\n JsonRpcBatchResponseItem::Error(e) => JsonRpcMessage::Error(e),\n }\n }\n}\n\n/// Represents any JSON-RPC message that can be sent or received.\n///\n/// This enum covers all possible message types in the JSON-RPC protocol:\n/// individual requests/responses, notifications, batch operations, and errors.\n/// It serves as the top-level message container for MCP communication.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum JsonRpcMessage {\n /// A single request expecting a response\n Request(JsonRpcRequest),\n /// A response to a previous request\n Response(JsonRpcResponse),\n /// A one-way notification (no response expected)\n Notification(JsonRpcNotification),\n /// Multiple requests sent together\n BatchRequest(Vec>),\n /// Multiple responses sent together\n BatchResponse(Vec>),\n /// An error response\n Error(JsonRpcError),\n}\n\nimpl JsonRpcMessage {\n #[inline]\n pub const fn request(request: Req, id: RequestId) -> Self {\n JsonRpcMessage::Request(JsonRpcRequest {\n jsonrpc: JsonRpcVersion2_0,\n id,\n request,\n })\n }\n #[inline]\n pub const fn response(response: Resp, id: RequestId) -> Self {\n JsonRpcMessage::Response(JsonRpcResponse {\n jsonrpc: JsonRpcVersion2_0,\n id,\n result: response,\n })\n }\n #[inline]\n pub const fn error(error: ErrorData, id: RequestId) -> Self {\n JsonRpcMessage::Error(JsonRpcError {\n jsonrpc: JsonRpcVersion2_0,\n id,\n error,\n })\n }\n #[inline]\n pub const fn notification(notification: Not) -> Self {\n JsonRpcMessage::Notification(JsonRpcNotification {\n jsonrpc: JsonRpcVersion2_0,\n notification,\n })\n }\n pub fn into_request(self) -> Option<(Req, RequestId)> {\n match self {\n JsonRpcMessage::Request(r) => Some((r.request, r.id)),\n _ => None,\n }\n }\n pub fn into_response(self) -> Option<(Resp, RequestId)> {\n match self {\n JsonRpcMessage::Response(r) => Some((r.result, r.id)),\n _ => None,\n }\n }\n pub fn into_notification(self) -> Option {\n match self {\n JsonRpcMessage::Notification(n) => Some(n.notification),\n _ => None,\n }\n }\n pub fn into_error(self) -> Option<(ErrorData, RequestId)> {\n match self {\n JsonRpcMessage::Error(e) => Some((e.error, e.id)),\n _ => None,\n }\n }\n pub fn into_result(self) -> Option<(Result, RequestId)> {\n match self {\n JsonRpcMessage::Response(r) => Some((Ok(r.result), r.id)),\n JsonRpcMessage::Error(e) => Some((Err(e.error), e.id)),\n\n _ => None,\n }\n }\n}\n\n// =============================================================================\n// INITIALIZATION AND CONNECTION SETUP\n// =============================================================================\n\n/// # Empty result\n/// A response that indicates success but carries no data.\npub type EmptyResult = EmptyObject;\n\nimpl From<()> for EmptyResult {\n fn from(_value: ()) -> Self {\n EmptyResult {}\n }\n}\n\nimpl From for () {\n fn from(_value: EmptyResult) {}\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CancelledNotificationParam {\n pub request_id: RequestId,\n pub reason: Option,\n}\n\nconst_string!(CancelledNotificationMethod = \"notifications/cancelled\");\n\n/// # Cancellation\n/// This notification can be sent by either side to indicate that it is cancelling a previously-issued request.\n///\n/// The request SHOULD still be in-flight, but due to communication latency, it is always possible that this notification MAY arrive after the request has already finished.\n///\n/// This notification indicates that the result will be unused, so any associated processing SHOULD cease.\n///\n/// A client MUST NOT attempt to cancel its `initialize` request.\npub type CancelledNotification =\n Notification;\n\nconst_string!(InitializeResultMethod = \"initialize\");\n/// # Initialization\n/// This request is sent from the client to the server when it first connects, asking it to begin initialization.\npub type InitializeRequest = Request;\n\nconst_string!(InitializedNotificationMethod = \"notifications/initialized\");\n/// This notification is sent from the client to the server after initialization has finished.\npub type InitializedNotification = NotificationNoParam;\n\n/// Parameters sent by a client when initializing a connection to an MCP server.\n///\n/// This contains the client's protocol version, capabilities, and implementation\n/// information, allowing the server to understand what the client supports.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct InitializeRequestParam {\n /// The MCP protocol version this client supports\n pub protocol_version: ProtocolVersion,\n /// The capabilities this client supports (sampling, roots, etc.)\n pub capabilities: ClientCapabilities,\n /// Information about the client implementation\n pub client_info: Implementation,\n}\n\n/// The server's response to an initialization request.\n///\n/// Contains the server's protocol version, capabilities, and implementation\n/// information, along with optional instructions for the client.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct InitializeResult {\n /// The MCP protocol version this server supports\n pub protocol_version: ProtocolVersion,\n /// The capabilities this server provides (tools, resources, prompts, etc.)\n pub capabilities: ServerCapabilities,\n /// Information about the server implementation\n pub server_info: Implementation,\n /// Optional human-readable instructions about using this server\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub instructions: Option,\n}\n\npub type ServerInfo = InitializeResult;\npub type ClientInfo = InitializeRequestParam;\n\nimpl Default for ServerInfo {\n fn default() -> Self {\n ServerInfo {\n protocol_version: ProtocolVersion::default(),\n capabilities: ServerCapabilities::default(),\n server_info: Implementation::from_build_env(),\n instructions: None,\n }\n }\n}\n\nimpl Default for ClientInfo {\n fn default() -> Self {\n ClientInfo {\n protocol_version: ProtocolVersion::default(),\n capabilities: ClientCapabilities::default(),\n client_info: Implementation::from_build_env(),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Implementation {\n pub name: String,\n pub version: String,\n}\n\nimpl Default for Implementation {\n fn default() -> Self {\n Self::from_build_env()\n }\n}\n\nimpl Implementation {\n pub fn from_build_env() -> Self {\n Implementation {\n name: env!(\"CARGO_CRATE_NAME\").to_owned(),\n version: env!(\"CARGO_PKG_VERSION\").to_owned(),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PaginatedRequestParam {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub cursor: Option,\n}\n// =============================================================================\n// PROGRESS AND PAGINATION\n// =============================================================================\n\nconst_string!(PingRequestMethod = \"ping\");\npub type PingRequest = RequestNoParam;\n\nconst_string!(ProgressNotificationMethod = \"notifications/progress\");\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ProgressNotificationParam {\n pub progress_token: ProgressToken,\n /// The progress thus far. This should increase every time progress is made, even if the total is unknown.\n pub progress: u32,\n /// Total number of items to process (or total progress required), if known\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub total: Option,\n /// An optional message describing the current progress.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub message: Option,\n}\n\npub type ProgressNotification = Notification;\n\npub type Cursor = String;\n\nmacro_rules! paginated_result {\n ($t:ident {\n $i_item: ident: $t_item: ty\n }) => {\n #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n #[serde(rename_all = \"camelCase\")]\n #[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\n pub struct $t {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub next_cursor: Option,\n pub $i_item: $t_item,\n }\n\n impl $t {\n pub fn with_all_items(\n items: $t_item,\n ) -> Self {\n Self {\n next_cursor: None,\n $i_item: items,\n }\n }\n }\n };\n}\n\n// =============================================================================\n// RESOURCE MANAGEMENT\n// =============================================================================\n\nconst_string!(ListResourcesRequestMethod = \"resources/list\");\n/// Request to list all available resources from a server\npub type ListResourcesRequest =\n RequestOptionalParam;\n\npaginated_result!(ListResourcesResult {\n resources: Vec\n});\n\nconst_string!(ListResourceTemplatesRequestMethod = \"resources/templates/list\");\n/// Request to list all available resource templates from a server\npub type ListResourceTemplatesRequest =\n RequestOptionalParam;\n\npaginated_result!(ListResourceTemplatesResult {\n resource_templates: Vec\n});\n\nconst_string!(ReadResourceRequestMethod = \"resources/read\");\n/// Parameters for reading a specific resource\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ReadResourceRequestParam {\n /// The URI of the resource to read\n pub uri: String,\n}\n\n/// Result containing the contents of a read resource\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ReadResourceResult {\n /// The actual content of the resource\n pub contents: Vec,\n}\n\n/// Request to read a specific resource\npub type ReadResourceRequest = Request;\n\nconst_string!(ResourceListChangedNotificationMethod = \"notifications/resources/list_changed\");\n/// Notification sent when the list of available resources changes\npub type ResourceListChangedNotification =\n NotificationNoParam;\n\nconst_string!(SubscribeRequestMethod = \"resources/subscribe\");\n/// Parameters for subscribing to resource updates\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct SubscribeRequestParam {\n /// The URI of the resource to subscribe to\n pub uri: String,\n}\n/// Request to subscribe to resource updates\npub type SubscribeRequest = Request;\n\nconst_string!(UnsubscribeRequestMethod = \"resources/unsubscribe\");\n/// Parameters for unsubscribing from resource updates\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct UnsubscribeRequestParam {\n /// The URI of the resource to unsubscribe from\n pub uri: String,\n}\n/// Request to unsubscribe from resource updates\npub type UnsubscribeRequest = Request;\n\nconst_string!(ResourceUpdatedNotificationMethod = \"notifications/resources/updated\");\n/// Parameters for a resource update notification\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ResourceUpdatedNotificationParam {\n /// The URI of the resource that was updated\n pub uri: String,\n}\n/// Notification sent when a subscribed resource is updated\npub type ResourceUpdatedNotification =\n Notification;\n\n// =============================================================================\n// PROMPT MANAGEMENT\n// =============================================================================\n\nconst_string!(ListPromptsRequestMethod = \"prompts/list\");\n/// Request to list all available prompts from a server\npub type ListPromptsRequest = RequestOptionalParam;\n\npaginated_result!(ListPromptsResult {\n prompts: Vec\n});\n\nconst_string!(GetPromptRequestMethod = \"prompts/get\");\n/// Parameters for retrieving a specific prompt\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct GetPromptRequestParam {\n pub name: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub arguments: Option,\n}\n/// Request to get a specific prompt\npub type GetPromptRequest = Request;\n\nconst_string!(PromptListChangedNotificationMethod = \"notifications/prompts/list_changed\");\n/// Notification sent when the list of available prompts changes\npub type PromptListChangedNotification = NotificationNoParam;\n\nconst_string!(ToolListChangedNotificationMethod = \"notifications/tools/list_changed\");\n/// Notification sent when the list of available tools changes\npub type ToolListChangedNotification = NotificationNoParam;\n\n// =============================================================================\n// LOGGING\n// =============================================================================\n\n/// Logging levels supported by the MCP protocol\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Copy)]\n#[serde(rename_all = \"lowercase\")] //match spec\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum LoggingLevel {\n Debug,\n Info,\n Notice,\n Warning,\n Error,\n Critical,\n Alert,\n Emergency,\n}\n\nconst_string!(SetLevelRequestMethod = \"logging/setLevel\");\n/// Parameters for setting the logging level\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct SetLevelRequestParam {\n /// The desired logging level\n pub level: LoggingLevel,\n}\n/// Request to set the logging level\npub type SetLevelRequest = Request;\n\nconst_string!(LoggingMessageNotificationMethod = \"notifications/message\");\n/// Parameters for a logging message notification\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct LoggingMessageNotificationParam {\n /// The severity level of this log message\n pub level: LoggingLevel,\n /// Optional logger name that generated this message\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub logger: Option,\n /// The actual log data\n pub data: Value,\n}\n/// Notification containing a log message\npub type LoggingMessageNotification =\n Notification;\n\n// =============================================================================\n// SAMPLING (LLM INTERACTION)\n// =============================================================================\n\nconst_string!(CreateMessageRequestMethod = \"sampling/createMessage\");\npub type CreateMessageRequest = Request;\n\n/// Represents the role of a participant in a conversation or message exchange.\n///\n/// Used in sampling and chat contexts to distinguish between different\n/// types of message senders in the conversation flow.\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum Role {\n /// A human user or client making a request\n User,\n /// An AI assistant or server providing a response\n Assistant,\n}\n\n/// A message in a sampling conversation, containing a role and content.\n///\n/// This represents a single message in a conversation flow, used primarily\n/// in LLM sampling requests where the conversation history is important\n/// for generating appropriate responses.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct SamplingMessage {\n /// The role of the message sender (User or Assistant)\n pub role: Role,\n /// The actual content of the message (text, image, etc.)\n pub content: Content,\n}\n\n/// Specifies how much context should be included in sampling requests.\n///\n/// This allows clients to control what additional context information\n/// should be provided to the LLM when processing sampling requests.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum ContextInclusion {\n /// Include context from all connected MCP servers\n #[serde(rename = \"allServers\")]\n AllServers,\n /// Include no additional context\n #[serde(rename = \"none\")]\n None,\n /// Include context only from the requesting server\n #[serde(rename = \"thisServer\")]\n ThisServer,\n}\n\n/// Parameters for creating a message through LLM sampling.\n///\n/// This structure contains all the necessary information for a client to\n/// generate an LLM response, including conversation history, model preferences,\n/// and generation parameters.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CreateMessageRequestParam {\n /// The conversation history and current messages\n pub messages: Vec,\n /// Preferences for model selection and behavior\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub model_preferences: Option,\n /// System prompt to guide the model's behavior\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub system_prompt: Option,\n /// How much context to include from MCP servers\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub include_context: Option,\n /// Temperature for controlling randomness (0.0 to 1.0)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub temperature: Option,\n /// Maximum number of tokens to generate\n pub max_tokens: u32,\n /// Sequences that should stop generation\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub stop_sequences: Option>,\n /// Additional metadata for the request\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub metadata: Option,\n}\n\n/// Preferences for model selection and behavior in sampling requests.\n///\n/// This allows servers to express their preferences for which model to use\n/// and how to balance different priorities when the client has multiple\n/// model options available.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ModelPreferences {\n /// Specific model names or families to prefer (e.g., \"claude\", \"gpt\")\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub hints: Option>,\n /// Priority for cost optimization (0.0 to 1.0, higher = prefer cheaper models)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub cost_priority: Option,\n /// Priority for speed/latency (0.0 to 1.0, higher = prefer faster models)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub speed_priority: Option,\n /// Priority for intelligence/capability (0.0 to 1.0, higher = prefer more capable models)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub intelligence_priority: Option,\n}\n\n/// A hint suggesting a preferred model name or family.\n///\n/// Model hints are advisory suggestions that help clients choose appropriate\n/// models. They can be specific model names or general families like \"claude\" or \"gpt\".\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ModelHint {\n /// The suggested model name or family identifier\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub name: Option,\n}\n\n// =============================================================================\n// COMPLETION AND AUTOCOMPLETE\n// =============================================================================\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CompleteRequestParam {\n pub r#ref: Reference,\n pub argument: ArgumentInfo,\n}\n\npub type CompleteRequest = Request;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CompletionInfo {\n pub values: Vec,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub total: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub has_more: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CompleteResult {\n pub completion: CompletionInfo,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(tag = \"type\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum Reference {\n #[serde(rename = \"ref/resource\")]\n Resource(ResourceReference),\n #[serde(rename = \"ref/prompt\")]\n Prompt(PromptReference),\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ResourceReference {\n pub uri: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptReference {\n pub name: String,\n}\n\nconst_string!(CompleteRequestMethod = \"completion/complete\");\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ArgumentInfo {\n pub name: String,\n pub value: String,\n}\n\n// =============================================================================\n// ROOTS AND WORKSPACE MANAGEMENT\n// =============================================================================\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Root {\n pub uri: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub name: Option,\n}\n\nconst_string!(ListRootsRequestMethod = \"roots/list\");\npub type ListRootsRequest = RequestNoParam;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ListRootsResult {\n pub roots: Vec,\n}\n\nconst_string!(RootsListChangedNotificationMethod = \"notifications/roots/list_changed\");\npub type RootsListChangedNotification = NotificationNoParam;\n\n// =============================================================================\n// TOOL EXECUTION RESULTS\n// =============================================================================\n\n/// The result of a tool call operation.\n///\n/// Contains the content returned by the tool execution and an optional\n/// flag indicating whether the operation resulted in an error.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CallToolResult {\n /// The content returned by the tool (text, images, etc.)\n pub content: Vec,\n /// Whether this result represents an error condition\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub is_error: Option,\n}\n\nimpl CallToolResult {\n /// Create a successful tool result\n pub fn success(content: Vec) -> Self {\n CallToolResult {\n content,\n is_error: Some(false),\n }\n }\n /// Create an error tool result\n pub fn error(content: Vec) -> Self {\n CallToolResult {\n content,\n is_error: Some(true),\n }\n }\n}\n\nconst_string!(ListToolsRequestMethod = \"tools/list\");\n/// Request to list all available tools from a server\npub type ListToolsRequest = RequestOptionalParam;\n\npaginated_result!(\n ListToolsResult {\n tools: Vec\n }\n);\n\nconst_string!(CallToolRequestMethod = \"tools/call\");\n/// Parameters for calling a tool provided by an MCP server.\n///\n/// Contains the tool name and optional arguments needed to execute\n/// the tool operation.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CallToolRequestParam {\n /// The name of the tool to call\n pub name: Cow<'static, str>,\n /// Arguments to pass to the tool (must match the tool's input schema)\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub arguments: Option,\n}\n\n/// Request to call a specific tool\npub type CallToolRequest = Request;\n\n/// The result of a sampling/createMessage request containing the generated response.\n///\n/// This structure contains the generated message along with metadata about\n/// how the generation was performed and why it stopped.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct CreateMessageResult {\n /// The identifier of the model that generated the response\n pub model: String,\n /// The reason why generation stopped (e.g., \"endTurn\", \"maxTokens\")\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub stop_reason: Option,\n /// The generated message with role and content\n #[serde(flatten)]\n pub message: SamplingMessage,\n}\n\nimpl CreateMessageResult {\n pub const STOP_REASON_END_TURN: &str = \"endTurn\";\n pub const STOP_REASON_END_SEQUENCE: &str = \"stopSequence\";\n pub const STOP_REASON_END_MAX_TOKEN: &str = \"maxTokens\";\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct GetPromptResult {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n pub messages: Vec,\n}\n\n// =============================================================================\n// MESSAGE TYPE UNIONS\n// =============================================================================\n\nmacro_rules! ts_union {\n (\n export type $U: ident =\n $(|)?$($V: ident)|*;\n ) => {\n #[derive(Debug, Serialize, Deserialize, Clone)]\n #[serde(untagged)]\n #[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\n pub enum $U {\n $($V($V),)*\n }\n };\n}\n\nts_union!(\n export type ClientRequest =\n | PingRequest\n | InitializeRequest\n | CompleteRequest\n | SetLevelRequest\n | GetPromptRequest\n | ListPromptsRequest\n | ListResourcesRequest\n | ListResourceTemplatesRequest\n | ReadResourceRequest\n | SubscribeRequest\n | UnsubscribeRequest\n | CallToolRequest\n | ListToolsRequest;\n);\n\nts_union!(\n export type ClientNotification =\n | CancelledNotification\n | ProgressNotification\n | InitializedNotification\n | RootsListChangedNotification;\n);\n\nts_union!(\n export type ClientResult = CreateMessageResult | ListRootsResult | EmptyResult;\n);\n\nimpl ClientResult {\n pub fn empty(_: ()) -> ClientResult {\n ClientResult::EmptyResult(EmptyResult {})\n }\n}\n\npub type ClientJsonRpcMessage = JsonRpcMessage;\n\nts_union!(\n export type ServerRequest =\n | PingRequest\n | CreateMessageRequest\n | ListRootsRequest;\n);\n\nts_union!(\n export type ServerNotification =\n | CancelledNotification\n | ProgressNotification\n | LoggingMessageNotification\n | ResourceUpdatedNotification\n | ResourceListChangedNotification\n | ToolListChangedNotification\n | PromptListChangedNotification;\n);\n\nts_union!(\n export type ServerResult =\n | InitializeResult\n | CompleteResult\n | GetPromptResult\n | ListPromptsResult\n | ListResourcesResult\n | ListResourceTemplatesResult\n | ReadResourceResult\n | CallToolResult\n | ListToolsResult\n | EmptyResult\n ;\n);\n\nimpl ServerResult {\n pub fn empty(_: ()) -> ServerResult {\n ServerResult::EmptyResult(EmptyResult {})\n }\n}\n\npub type ServerJsonRpcMessage = JsonRpcMessage;\n\nimpl TryInto for ServerNotification {\n type Error = ServerNotification;\n fn try_into(self) -> Result {\n if let ServerNotification::CancelledNotification(t) = self {\n Ok(t)\n } else {\n Err(self)\n }\n }\n}\n\nimpl TryInto for ClientNotification {\n type Error = ClientNotification;\n fn try_into(self) -> Result {\n if let ClientNotification::CancelledNotification(t) = self {\n Ok(t)\n } else {\n Err(self)\n }\n }\n}\nimpl From for ServerNotification {\n fn from(value: CancelledNotification) -> Self {\n ServerNotification::CancelledNotification(value)\n }\n}\n\nimpl From for ClientNotification {\n fn from(value: CancelledNotification) -> Self {\n ClientNotification::CancelledNotification(value)\n }\n}\n\n// =============================================================================\n// TESTS\n// =============================================================================\n\n#[cfg(test)]\nmod tests {\n use serde_json::json;\n\n use super::*;\n\n #[test]\n fn test_notification_serde() {\n let raw = json!( {\n \"jsonrpc\": JsonRpcVersion2_0,\n \"method\": InitializedNotificationMethod,\n });\n let message: ClientJsonRpcMessage =\n serde_json::from_value(raw.clone()).expect(\"invalid notification\");\n match &message {\n ClientJsonRpcMessage::Notification(JsonRpcNotification {\n notification: ClientNotification::InitializedNotification(_n),\n ..\n }) => {}\n _ => panic!(\"Expected Notification\"),\n }\n let json = serde_json::to_value(message).expect(\"valid json\");\n assert_eq!(json, raw);\n }\n\n #[test]\n fn test_request_conversion() {\n let raw = json!( {\n \"jsonrpc\": JsonRpcVersion2_0,\n \"id\": 1,\n \"method\": \"request\",\n \"params\": {\"key\": \"value\"},\n });\n let message: JsonRpcMessage = serde_json::from_value(raw.clone()).expect(\"invalid request\");\n\n match &message {\n JsonRpcMessage::Request(r) => {\n assert_eq!(r.id, RequestId::Number(1));\n assert_eq!(r.request.method, \"request\");\n assert_eq!(\n &r.request.params,\n json!({\"key\": \"value\"})\n .as_object()\n .expect(\"should be an object\")\n );\n }\n _ => panic!(\"Expected Request\"),\n }\n let json = serde_json::to_value(&message).expect(\"valid json\");\n assert_eq!(json, raw);\n }\n\n #[test]\n fn test_initial_request_response_serde() {\n let request = json!({\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"method\": \"initialize\",\n \"params\": {\n \"protocolVersion\": \"2024-11-05\",\n \"capabilities\": {\n \"roots\": {\n \"listChanged\": true\n },\n \"sampling\": {}\n },\n \"clientInfo\": {\n \"name\": \"ExampleClient\",\n \"version\": \"1.0.0\"\n }\n }\n });\n let raw_response_json = json!({\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"result\": {\n \"protocolVersion\": \"2024-11-05\",\n \"capabilities\": {\n \"logging\": {},\n \"prompts\": {\n \"listChanged\": true\n },\n \"resources\": {\n \"subscribe\": true,\n \"listChanged\": true\n },\n \"tools\": {\n \"listChanged\": true\n }\n },\n \"serverInfo\": {\n \"name\": \"ExampleServer\",\n \"version\": \"1.0.0\"\n }\n }\n });\n let request: ClientJsonRpcMessage =\n serde_json::from_value(request.clone()).expect(\"invalid request\");\n let (request, id) = request.into_request().expect(\"should be a request\");\n assert_eq!(id, RequestId::Number(1));\n match request {\n ClientRequest::InitializeRequest(Request {\n method: _,\n params:\n InitializeRequestParam {\n protocol_version: _,\n capabilities,\n client_info,\n },\n ..\n }) => {\n assert_eq!(capabilities.roots.unwrap().list_changed, Some(true));\n assert_eq!(capabilities.sampling.unwrap().len(), 0);\n assert_eq!(client_info.name, \"ExampleClient\");\n assert_eq!(client_info.version, \"1.0.0\");\n }\n _ => panic!(\"Expected InitializeRequest\"),\n }\n let server_response: ServerJsonRpcMessage =\n serde_json::from_value(raw_response_json.clone()).expect(\"invalid response\");\n let (response, id) = server_response\n .clone()\n .into_response()\n .expect(\"expect response\");\n assert_eq!(id, RequestId::Number(1));\n match response {\n ServerResult::InitializeResult(InitializeResult {\n protocol_version: _,\n capabilities,\n server_info,\n instructions,\n }) => {\n assert_eq!(capabilities.logging.unwrap().len(), 0);\n assert_eq!(capabilities.prompts.unwrap().list_changed, Some(true));\n assert_eq!(\n capabilities.resources.as_ref().unwrap().subscribe,\n Some(true)\n );\n assert_eq!(capabilities.resources.unwrap().list_changed, Some(true));\n assert_eq!(capabilities.tools.unwrap().list_changed, Some(true));\n assert_eq!(server_info.name, \"ExampleServer\");\n assert_eq!(server_info.version, \"1.0.0\");\n assert_eq!(instructions, None);\n }\n other => panic!(\"Expected InitializeResult, got {other:?}\"),\n }\n\n let server_response_json: Value = serde_json::to_value(&server_response).expect(\"msg\");\n\n assert_eq!(server_response_json, raw_response_json);\n }\n\n #[test]\n fn test_protocol_version_order() {\n let v1 = ProtocolVersion::V_2024_11_05;\n let v2 = ProtocolVersion::V_2025_03_26;\n assert!(v1 < v2);\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server/router.rs", "use std::sync::Arc;\n\nuse tool::{IntoToolRoute, ToolRoute};\n\nuse super::ServerHandler;\nuse crate::{\n RoleServer, Service,\n model::{ClientRequest, ListToolsResult, ServerResult},\n service::NotificationContext,\n};\n\npub mod tool;\n\npub struct Router {\n pub tool_router: tool::ToolRouter,\n pub service: Arc,\n}\n\nimpl Router\nwhere\n S: ServerHandler,\n{\n pub fn new(service: S) -> Self {\n Self {\n tool_router: tool::ToolRouter::new(),\n service: Arc::new(service),\n }\n }\n\n pub fn with_tool(mut self, route: R) -> Self\n where\n R: IntoToolRoute,\n {\n self.tool_router.add_route(route.into_tool_route());\n self\n }\n\n pub fn with_tools(mut self, routes: impl IntoIterator>) -> Self {\n for route in routes {\n self.tool_router.add_route(route);\n }\n self\n }\n}\n\nimpl Service for Router\nwhere\n S: ServerHandler,\n{\n async fn handle_notification(\n &self,\n notification: ::PeerNot,\n context: NotificationContext,\n ) -> Result<(), crate::ErrorData> {\n self.service\n .handle_notification(notification, context)\n .await\n }\n async fn handle_request(\n &self,\n request: ::PeerReq,\n context: crate::service::RequestContext,\n ) -> Result<::Resp, crate::ErrorData> {\n match request {\n ClientRequest::CallToolRequest(request) => {\n if self.tool_router.has_route(request.params.name.as_ref())\n || !self.tool_router.transparent_when_not_found\n {\n let tool_call_context = crate::handler::server::tool::ToolCallContext::new(\n self.service.as_ref(),\n request.params,\n context,\n );\n let result = self.tool_router.call(tool_call_context).await?;\n Ok(ServerResult::CallToolResult(result))\n } else {\n self.service\n .handle_request(ClientRequest::CallToolRequest(request), context)\n .await\n }\n }\n ClientRequest::ListToolsRequest(_) => {\n let tools = self.tool_router.list_all();\n Ok(ServerResult::ListToolsResult(ListToolsResult {\n tools,\n next_cursor: None,\n }))\n }\n rest => self.service.handle_request(rest, context).await,\n }\n }\n\n fn get_info(&self) -> ::Info {\n self.service.get_info()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/capabilities.rs", "use std::{collections::BTreeMap, marker::PhantomData};\n\nuse paste::paste;\nuse serde::{Deserialize, Serialize};\n\nuse super::JsonObject;\npub type ExperimentalCapabilities = BTreeMap;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptsCapability {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub list_changed: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ResourcesCapability {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub subscribe: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub list_changed: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ToolsCapability {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub list_changed: Option,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RootsCapabilities {\n pub list_changed: Option,\n}\n\n///\n/// # Builder\n/// ```rust\n/// # use rmcp::model::ClientCapabilities;\n/// let cap = ClientCapabilities::builder()\n/// .enable_experimental()\n/// .enable_roots()\n/// .enable_roots_list_changed()\n/// .build();\n/// ```\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ClientCapabilities {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub experimental: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub roots: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub sampling: Option,\n}\n\n///\n/// ## Builder\n/// ```rust\n/// # use rmcp::model::ServerCapabilities;\n/// let cap = ServerCapabilities::builder()\n/// .enable_logging()\n/// .enable_experimental()\n/// .enable_prompts()\n/// .enable_resources()\n/// .enable_tools()\n/// .enable_tool_list_changed()\n/// .build();\n/// ```\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ServerCapabilities {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub experimental: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub logging: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub completions: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub prompts: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub resources: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub tools: Option,\n}\n\nmacro_rules! builder {\n ($Target: ident {$($f: ident: $T: ty),* $(,)?}) => {\n paste! {\n #[derive(Default, Clone, Copy, Debug)]\n pub struct [<$Target BuilderState>]<\n $(const [<$f:upper>]: bool = false,)*\n >;\n #[derive(Debug, Default)]\n pub struct [<$Target Builder>]]> {\n $(pub $f: Option<$T>,)*\n pub state: PhantomData\n }\n impl $Target {\n #[doc = \"Create a new [`\" $Target \"`] builder.\"]\n pub fn builder() -> [<$Target Builder>] {\n <[<$Target Builder>]>::default()\n }\n }\n impl [<$Target Builder>] {\n pub fn build(self) -> $Target {\n $Target {\n $( $f: self.$f, )*\n }\n }\n }\n impl From<[<$Target Builder>]> for $Target {\n fn from(builder: [<$Target Builder>]) -> Self {\n builder.build()\n }\n }\n }\n builder!($Target @toggle $($f: $T,) *);\n\n };\n ($Target: ident @toggle $f0: ident: $T0: ty, $($f: ident: $T: ty,)*) => {\n builder!($Target @toggle [][$f0: $T0][$($f: $T,)*]);\n };\n ($Target: ident @toggle [$($ff: ident: $Tf: ty,)*][$fn: ident: $TN: ty][$fn_1: ident: $Tn_1: ty, $($ft: ident: $Tt: ty,)*]) => {\n builder!($Target @impl_toggle [$($ff: $Tf,)*][$fn: $TN][$fn_1: $Tn_1, $($ft:$Tt,)*]);\n builder!($Target @toggle [$($ff: $Tf,)* $fn: $TN,][$fn_1: $Tn_1][$($ft:$Tt,)*]);\n };\n ($Target: ident @toggle [$($ff: ident: $Tf: ty,)*][$fn: ident: $TN: ty][]) => {\n builder!($Target @impl_toggle [$($ff: $Tf,)*][$fn: $TN][]);\n };\n ($Target: ident @impl_toggle [$($ff: ident: $Tf: ty,)*][$fn: ident: $TN: ty][$($ft: ident: $Tt: ty,)*]) => {\n paste! {\n impl<\n $(const [<$ff:upper>]: bool,)*\n $(const [<$ft:upper>]: bool,)*\n > [<$Target Builder>]<[<$Target BuilderState>]<\n $([<$ff:upper>],)*\n false,\n $([<$ft:upper>],)*\n >> {\n pub fn [](self) -> [<$Target Builder>]<[<$Target BuilderState>]<\n $([<$ff:upper>],)*\n true,\n $([<$ft:upper>],)*\n >> {\n [<$Target Builder>] {\n $( $ff: self.$ff, )*\n $fn: Some($TN::default()),\n $( $ft: self.$ft, )*\n state: PhantomData\n }\n }\n pub fn [](self, $fn: $TN) -> [<$Target Builder>]<[<$Target BuilderState>]<\n $([<$ff:upper>],)*\n true,\n $([<$ft:upper>],)*\n >> {\n [<$Target Builder>] {\n $( $ff: self.$ff, )*\n $fn: Some($fn),\n $( $ft: self.$ft, )*\n state: PhantomData\n }\n }\n }\n // do we really need to disable some thing in builder?\n // impl<\n // $(const [<$ff:upper>]: bool,)*\n // $(const [<$ft:upper>]: bool,)*\n // > [<$Target Builder>]<[<$Target BuilderState>]<\n // $([<$ff:upper>],)*\n // true,\n // $([<$ft:upper>],)*\n // >> {\n // pub fn [](self) -> [<$Target Builder>]<[<$Target BuilderState>]<\n // $([<$ff:upper>],)*\n // false,\n // $([<$ft:upper>],)*\n // >> {\n // [<$Target Builder>] {\n // $( $ff: self.$ff, )*\n // $fn: None,\n // $( $ft: self.$ft, )*\n // state: PhantomData\n // }\n // }\n // }\n }\n }\n}\n\nbuilder! {\n ServerCapabilities {\n experimental: ExperimentalCapabilities,\n logging: JsonObject,\n completions: JsonObject,\n prompts: PromptsCapability,\n resources: ResourcesCapability,\n tools: ToolsCapability\n }\n}\n\nimpl\n ServerCapabilitiesBuilder>\n{\n pub fn enable_tool_list_changed(mut self) -> Self {\n if let Some(c) = self.tools.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n}\n\nimpl\n ServerCapabilitiesBuilder>\n{\n pub fn enable_prompts_list_changed(mut self) -> Self {\n if let Some(c) = self.prompts.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n}\n\nimpl\n ServerCapabilitiesBuilder>\n{\n pub fn enable_resources_list_changed(mut self) -> Self {\n if let Some(c) = self.resources.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n\n pub fn enable_resources_subscribe(mut self) -> Self {\n if let Some(c) = self.resources.as_mut() {\n c.subscribe = Some(true);\n }\n self\n }\n}\n\nbuilder! {\n ClientCapabilities{\n experimental: ExperimentalCapabilities,\n roots: RootsCapabilities,\n sampling: JsonObject,\n }\n}\n\nimpl\n ClientCapabilitiesBuilder>\n{\n pub fn enable_roots_list_changed(mut self) -> Self {\n if let Some(c) = self.roots.as_mut() {\n c.list_changed = Some(true);\n }\n self\n }\n}\n\n#[cfg(test)]\nmod test {\n use super::*;\n #[test]\n fn test_builder() {\n let builder = ::default()\n .enable_logging()\n .enable_experimental()\n .enable_prompts()\n .enable_resources()\n .enable_tools()\n .enable_tool_list_changed();\n assert_eq!(builder.logging, Some(JsonObject::default()));\n assert_eq!(builder.prompts, Some(PromptsCapability::default()));\n assert_eq!(builder.resources, Some(ResourcesCapability::default()));\n assert_eq!(\n builder.tools,\n Some(ToolsCapability {\n list_changed: Some(true),\n })\n );\n assert_eq!(\n builder.experimental,\n Some(ExperimentalCapabilities::default())\n );\n let client_builder = ::default()\n .enable_experimental()\n .enable_roots()\n .enable_roots_list_changed()\n .enable_sampling();\n assert_eq!(\n client_builder.experimental,\n Some(ExperimentalCapabilities::default())\n );\n assert_eq!(\n client_builder.roots,\n Some(RootsCapabilities {\n list_changed: Some(true),\n })\n );\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/content.rs", "//! Content sent around agents, extensions, and LLMs\n//! The various content types can be display to humans but also understood by models\n//! They include optional annotations used to help inform agent usage\nuse serde::{Deserialize, Serialize};\nuse serde_json::json;\n\nuse super::{AnnotateAble, Annotated, resource::ResourceContents};\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawTextContent {\n pub text: String,\n}\npub type TextContent = Annotated;\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawImageContent {\n /// The base64-encoded image\n pub data: String,\n pub mime_type: String,\n}\n\npub type ImageContent = Annotated;\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawEmbeddedResource {\n pub resource: ResourceContents,\n}\npub type EmbeddedResource = Annotated;\n\nimpl EmbeddedResource {\n pub fn get_text(&self) -> String {\n match &self.resource {\n ResourceContents::TextResourceContents { text, .. } => text.clone(),\n _ => String::new(),\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawAudioContent {\n pub data: String,\n pub mime_type: String,\n}\n\npub type AudioContent = Annotated;\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(tag = \"type\", rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum RawContent {\n Text(RawTextContent),\n Image(RawImageContent),\n Resource(RawEmbeddedResource),\n Audio(AudioContent),\n}\n\npub type Content = Annotated;\n\nimpl RawContent {\n pub fn json(json: S) -> Result {\n let json = serde_json::to_string(&json).map_err(|e| {\n crate::ErrorData::internal_error(\n \"fail to serialize response to json\",\n Some(json!(\n {\"reason\": e.to_string()}\n )),\n )\n })?;\n Ok(RawContent::text(json))\n }\n\n pub fn text>(text: S) -> Self {\n RawContent::Text(RawTextContent { text: text.into() })\n }\n\n pub fn image, T: Into>(data: S, mime_type: T) -> Self {\n RawContent::Image(RawImageContent {\n data: data.into(),\n mime_type: mime_type.into(),\n })\n }\n\n pub fn resource(resource: ResourceContents) -> Self {\n RawContent::Resource(RawEmbeddedResource { resource })\n }\n\n pub fn embedded_text, T: Into>(uri: S, content: T) -> Self {\n RawContent::Resource(RawEmbeddedResource {\n resource: ResourceContents::TextResourceContents {\n uri: uri.into(),\n mime_type: Some(\"text\".to_string()),\n text: content.into(),\n },\n })\n }\n\n /// Get the text content if this is a TextContent variant\n pub fn as_text(&self) -> Option<&RawTextContent> {\n match self {\n RawContent::Text(text) => Some(text),\n _ => None,\n }\n }\n\n /// Get the image content if this is an ImageContent variant\n pub fn as_image(&self) -> Option<&RawImageContent> {\n match self {\n RawContent::Image(image) => Some(image),\n _ => None,\n }\n }\n\n /// Get the resource content if this is an ImageContent variant\n pub fn as_resource(&self) -> Option<&RawEmbeddedResource> {\n match self {\n RawContent::Resource(resource) => Some(resource),\n _ => None,\n }\n }\n}\n\nimpl Content {\n pub fn text>(text: S) -> Self {\n RawContent::text(text).no_annotation()\n }\n\n pub fn image, T: Into>(data: S, mime_type: T) -> Self {\n RawContent::image(data, mime_type).no_annotation()\n }\n\n pub fn resource(resource: ResourceContents) -> Self {\n RawContent::resource(resource).no_annotation()\n }\n\n pub fn embedded_text, T: Into>(uri: S, content: T) -> Self {\n RawContent::embedded_text(uri, content).no_annotation()\n }\n\n pub fn json(json: S) -> Result {\n RawContent::json(json).map(|c| c.no_annotation())\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\npub struct JsonContent(S);\n/// Types that can be converted into a list of contents\npub trait IntoContents {\n fn into_contents(self) -> Vec;\n}\n\nimpl IntoContents for Content {\n fn into_contents(self) -> Vec {\n vec![self]\n }\n}\n\nimpl IntoContents for String {\n fn into_contents(self) -> Vec {\n vec![Content::text(self)]\n }\n}\n\nimpl IntoContents for () {\n fn into_contents(self) -> Vec {\n vec![]\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server/router/tool.rs", "use std::{borrow::Cow, sync::Arc};\n\nuse futures::{FutureExt, future::BoxFuture};\nuse schemars::JsonSchema;\n\nuse crate::{\n handler::server::tool::{\n CallToolHandler, DynCallToolHandler, ToolCallContext, schema_for_type,\n },\n model::{CallToolResult, Tool, ToolAnnotations},\n};\n\npub struct ToolRoute {\n #[allow(clippy::type_complexity)]\n pub call: Arc>,\n pub attr: crate::model::Tool,\n}\n\nimpl std::fmt::Debug for ToolRoute {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n f.debug_struct(\"ToolRoute\")\n .field(\"name\", &self.attr.name)\n .field(\"description\", &self.attr.description)\n .field(\"input_schema\", &self.attr.input_schema)\n .finish()\n }\n}\n\nimpl Clone for ToolRoute {\n fn clone(&self) -> Self {\n Self {\n call: self.call.clone(),\n attr: self.attr.clone(),\n }\n }\n}\n\nimpl ToolRoute {\n pub fn new(attr: impl Into, call: C) -> Self\n where\n C: CallToolHandler + Send + Sync + Clone + 'static,\n {\n Self {\n call: Arc::new(move |context: ToolCallContext| {\n let call = call.clone();\n context.invoke(call).boxed()\n }),\n attr: attr.into(),\n }\n }\n pub fn new_dyn(attr: impl Into, call: C) -> Self\n where\n C: for<'a> Fn(\n ToolCallContext<'a, S>,\n ) -> BoxFuture<'a, Result>\n + Send\n + Sync\n + 'static,\n {\n Self {\n call: Arc::new(call),\n attr: attr.into(),\n }\n }\n pub fn name(&self) -> &str {\n &self.attr.name\n }\n}\n\npub trait IntoToolRoute {\n fn into_tool_route(self) -> ToolRoute;\n}\n\nimpl IntoToolRoute for (T, C)\nwhere\n S: Send + Sync + 'static,\n C: CallToolHandler + Send + Sync + Clone + 'static,\n T: Into,\n{\n fn into_tool_route(self) -> ToolRoute {\n ToolRoute::new(self.0.into(), self.1)\n }\n}\n\nimpl IntoToolRoute for ToolRoute\nwhere\n S: Send + Sync + 'static,\n{\n fn into_tool_route(self) -> ToolRoute {\n self\n }\n}\n\npub struct ToolAttrGenerateFunctionAdapter;\nimpl IntoToolRoute for F\nwhere\n S: Send + Sync + 'static,\n F: Fn() -> ToolRoute,\n{\n fn into_tool_route(self) -> ToolRoute {\n (self)()\n }\n}\n\npub trait CallToolHandlerExt: Sized\nwhere\n Self: CallToolHandler + Send + Sync + Clone + 'static,\n{\n fn name(self, name: impl Into>) -> WithToolAttr;\n}\n\nimpl CallToolHandlerExt for C\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n{\n fn name(self, name: impl Into>) -> WithToolAttr {\n WithToolAttr {\n attr: Tool::new(\n name.into(),\n \"\",\n schema_for_type::(),\n ),\n call: self,\n _marker: std::marker::PhantomData,\n }\n }\n}\n\npub struct WithToolAttr\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n{\n pub attr: crate::model::Tool,\n pub call: C,\n pub _marker: std::marker::PhantomData,\n}\n\nimpl IntoToolRoute for WithToolAttr\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n S: Send + Sync + 'static,\n{\n fn into_tool_route(self) -> ToolRoute {\n ToolRoute::new(self.attr, self.call)\n }\n}\n\nimpl WithToolAttr\nwhere\n C: CallToolHandler + Send + Sync + Clone + 'static,\n{\n pub fn description(mut self, description: impl Into>) -> Self {\n self.attr.description = Some(description.into());\n self\n }\n pub fn parameters(mut self) -> Self {\n self.attr.input_schema = schema_for_type::().into();\n self\n }\n pub fn parameters_value(mut self, schema: serde_json::Value) -> Self {\n self.attr.input_schema = crate::model::object(schema).into();\n self\n }\n pub fn annotation(mut self, annotation: impl Into) -> Self {\n self.attr.annotations = Some(annotation.into());\n self\n }\n}\n#[derive(Debug)]\npub struct ToolRouter {\n #[allow(clippy::type_complexity)]\n pub map: std::collections::HashMap, ToolRoute>,\n\n pub transparent_when_not_found: bool,\n}\n\nimpl Default for ToolRouter {\n fn default() -> Self {\n Self {\n map: std::collections::HashMap::new(),\n transparent_when_not_found: false,\n }\n }\n}\nimpl Clone for ToolRouter {\n fn clone(&self) -> Self {\n Self {\n map: self.map.clone(),\n transparent_when_not_found: self.transparent_when_not_found,\n }\n }\n}\n\nimpl IntoIterator for ToolRouter {\n type Item = ToolRoute;\n type IntoIter = std::collections::hash_map::IntoValues, ToolRoute>;\n\n fn into_iter(self) -> Self::IntoIter {\n self.map.into_values()\n }\n}\n\nimpl ToolRouter\nwhere\n S: Send + Sync + 'static,\n{\n pub fn new() -> Self {\n Self {\n map: std::collections::HashMap::new(),\n transparent_when_not_found: false,\n }\n }\n pub fn with_route(mut self, route: R) -> Self\n where\n R: IntoToolRoute,\n {\n self.add_route(route.into_tool_route());\n self\n }\n\n pub fn add_route(&mut self, item: ToolRoute) {\n self.map.insert(item.attr.name.clone(), item);\n }\n\n pub fn merge(&mut self, other: ToolRouter) {\n for item in other.map.into_values() {\n self.add_route(item);\n }\n }\n\n pub fn remove_route(&mut self, name: &str) {\n self.map.remove(name);\n }\n pub fn has_route(&self, name: &str) -> bool {\n self.map.contains_key(name)\n }\n pub async fn call(\n &self,\n context: ToolCallContext<'_, S>,\n ) -> Result {\n let item = self\n .map\n .get(context.name())\n .ok_or_else(|| crate::ErrorData::invalid_params(\"tool not found\", None))?;\n (item.call)(context).await\n }\n\n pub fn list_all(&self) -> Vec {\n self.map.values().map(|item| item.attr.clone()).collect()\n }\n}\n\nimpl std::ops::Add> for ToolRouter\nwhere\n S: Send + Sync + 'static,\n{\n type Output = Self;\n\n fn add(mut self, other: ToolRouter) -> Self::Output {\n self.merge(other);\n self\n }\n}\n\nimpl std::ops::AddAssign> for ToolRouter\nwhere\n S: Send + Sync + 'static,\n{\n fn add_assign(&mut self, other: ToolRouter) {\n self.merge(other);\n }\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/tool.rs", "use darling::{FromMeta, ast::NestedMeta};\nuse proc_macro2::TokenStream;\nuse quote::{ToTokens, format_ident, quote};\nuse syn::{Expr, Ident, ImplItemFn, ReturnType};\n#[derive(FromMeta, Default, Debug)]\n#[darling(default)]\npub struct ToolAttribute {\n /// The name of the tool\n pub name: Option,\n pub description: Option,\n /// A JSON Schema object defining the expected parameters for the tool\n pub input_schema: Option,\n /// Optional additional tool information.\n pub annotations: Option,\n}\n\npub struct ResolvedToolAttribute {\n pub name: String,\n pub description: Option,\n pub input_schema: Expr,\n pub annotations: Expr,\n}\n\nimpl ResolvedToolAttribute {\n pub fn into_fn(self, fn_ident: Ident) -> syn::Result {\n let Self {\n name,\n description,\n input_schema,\n annotations,\n } = self;\n let description = if let Some(description) = description {\n quote! { Some(#description.into()) }\n } else {\n quote! { None }\n };\n let tokens = quote! {\n pub fn #fn_ident() -> rmcp::model::Tool {\n rmcp::model::Tool {\n name: #name.into(),\n description: #description,\n input_schema: #input_schema,\n annotations: #annotations,\n }\n }\n };\n syn::parse2::(tokens)\n }\n}\n\n#[derive(FromMeta, Debug, Default)]\n#[darling(default)]\npub struct ToolAnnotationsAttribute {\n /// A human-readable title for the tool.\n pub title: Option,\n\n /// If true, the tool does not modify its environment.\n ///\n /// Default: false\n pub read_only_hint: Option,\n\n /// If true, the tool may perform destructive updates to its environment.\n /// If false, the tool performs only additive updates.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: true\n /// A human-readable description of the tool's purpose.\n pub destructive_hint: Option,\n\n /// If true, calling the tool repeatedly with the same arguments\n /// will have no additional effect on the its environment.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: false.\n pub idempotent_hint: Option,\n\n /// If true, this tool may interact with an \"open world\" of external\n /// entities. If false, the tool's domain of interaction is closed.\n /// For example, the world of a web search tool is open, whereas that\n /// of a memory tool is not.\n ///\n /// Default: true\n pub open_world_hint: Option,\n}\n\nfn none_expr() -> Expr {\n syn::parse2::(quote! { None }).unwrap()\n}\n\n// extract doc line from attribute\nfn extract_doc_line(existing_docs: Option, attr: &syn::Attribute) -> Option {\n if !attr.path().is_ident(\"doc\") {\n return None;\n }\n\n let syn::Meta::NameValue(name_value) = &attr.meta else {\n return None;\n };\n\n let syn::Expr::Lit(expr_lit) = &name_value.value else {\n return None;\n };\n\n let syn::Lit::Str(lit_str) = &expr_lit.lit else {\n return None;\n };\n\n let content = lit_str.value().trim().to_string();\n match (existing_docs, content) {\n (Some(mut existing_docs), content) if !content.is_empty() => {\n existing_docs.push('\\n');\n existing_docs.push_str(&content);\n Some(existing_docs)\n }\n (Some(existing_docs), _) => Some(existing_docs),\n (None, content) if !content.is_empty() => Some(content),\n _ => None,\n }\n}\n\npub fn tool(attr: TokenStream, input: TokenStream) -> syn::Result {\n let attribute = if attr.is_empty() {\n Default::default()\n } else {\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n ToolAttribute::from_list(&attr_args)?\n };\n let mut fn_item = syn::parse2::(input.clone())?;\n let fn_ident = &fn_item.sig.ident;\n\n let tool_attr_fn_ident = format_ident!(\"{}_tool_attr\", fn_ident);\n let input_schema_expr = if let Some(input_schema) = attribute.input_schema {\n input_schema\n } else {\n // try to find some parameters wrapper in the function\n let params_ty = fn_item.sig.inputs.iter().find_map(|input| {\n if let syn::FnArg::Typed(pat_type) = input {\n if let syn::Type::Path(type_path) = &*pat_type.ty {\n if type_path\n .path\n .segments\n .last()\n .is_some_and(|type_name| type_name.ident == \"Parameters\")\n {\n return Some(pat_type.ty.clone());\n }\n }\n }\n None\n });\n if let Some(params_ty) = params_ty {\n // if found, use the Parameters schema\n syn::parse2::(quote! {\n rmcp::handler::server::tool::cached_schema_for_type::<#params_ty>()\n })?\n } else {\n // if not found, use the default EmptyObject schema\n syn::parse2::(quote! {\n rmcp::handler::server::tool::cached_schema_for_type::()\n })?\n }\n };\n let annotations_expr = if let Some(annotations) = attribute.annotations {\n let ToolAnnotationsAttribute {\n title,\n read_only_hint,\n destructive_hint,\n idempotent_hint,\n open_world_hint,\n } = annotations;\n fn wrap_option(x: Option) -> TokenStream {\n x.map(|x| quote! {Some(#x.into())})\n .unwrap_or(quote! { None })\n }\n let title = wrap_option(title);\n let read_only_hint = wrap_option(read_only_hint);\n let destructive_hint = wrap_option(destructive_hint);\n let idempotent_hint = wrap_option(idempotent_hint);\n let open_world_hint = wrap_option(open_world_hint);\n let token_stream = quote! {\n Some(rmcp::model::ToolAnnotations {\n title: #title,\n read_only_hint: #read_only_hint,\n destructive_hint: #destructive_hint,\n idempotent_hint: #idempotent_hint,\n open_world_hint: #open_world_hint,\n })\n };\n syn::parse2::(token_stream)?\n } else {\n none_expr()\n };\n let resolved_tool_attr = ResolvedToolAttribute {\n name: attribute.name.unwrap_or_else(|| fn_ident.to_string()),\n description: attribute\n .description\n .or_else(|| fn_item.attrs.iter().fold(None, extract_doc_line)),\n input_schema: input_schema_expr,\n annotations: annotations_expr,\n };\n let tool_attr_fn = resolved_tool_attr.into_fn(tool_attr_fn_ident)?;\n // modify the the input function\n if fn_item.sig.asyncness.is_some() {\n // 1. remove asyncness from sig\n // 2. make return type: `std::pin::Pin + Send + '_>>`\n // 3. make body: { Box::pin(async move { #body }) }\n let new_output = syn::parse2::({\n let mut lt = quote! { 'static };\n if let Some(receiver) = fn_item.sig.receiver() {\n if let Some((_, receiver_lt)) = receiver.reference.as_ref() {\n if let Some(receiver_lt) = receiver_lt {\n lt = quote! { #receiver_lt };\n } else {\n lt = quote! { '_ };\n }\n }\n }\n match &fn_item.sig.output {\n syn::ReturnType::Default => {\n quote! { -> std::pin::Pin + Send + #lt>> }\n }\n syn::ReturnType::Type(_, ty) => {\n quote! { -> std::pin::Pin + Send + #lt>> }\n }\n }\n })?;\n let prev_block = &fn_item.block;\n let new_block = syn::parse2::(quote! {\n { Box::pin(async move #prev_block ) }\n })?;\n fn_item.sig.asyncness = None;\n fn_item.sig.output = new_output;\n fn_item.block = new_block;\n }\n Ok(quote! {\n #tool_attr_fn\n #fn_item\n })\n}\n\n#[cfg(test)]\nmod test {\n use super::*;\n #[test]\n fn test_trait_tool_macro() -> syn::Result<()> {\n let attr = quote! {\n name = \"direct-annotated-tool\",\n annotations(title = \"Annotated Tool\", read_only_hint = true)\n };\n let input = quote! {\n async fn async_method(&self, Parameters(Request { fields }): Parameters) {\n drop(fields)\n }\n };\n let _input = tool(attr, input)?;\n\n Ok(())\n }\n\n #[test]\n fn test_doc_comment_description() -> syn::Result<()> {\n let attr = quote! {}; // No explicit description\n let input = quote! {\n /// This is a test description from doc comments\n /// with multiple lines\n fn test_function(&self) -> Result<(), Error> {\n Ok(())\n }\n };\n let result = tool(attr, input)?;\n\n // The output should contain the description from doc comments\n let result_str = result.to_string();\n assert!(result_str.contains(\"This is a test description from doc comments\"));\n assert!(result_str.contains(\"with multiple lines\"));\n\n Ok(())\n }\n\n #[test]\n fn test_explicit_description_priority() -> syn::Result<()> {\n let attr = quote! {\n description = \"Explicit description has priority\"\n };\n let input = quote! {\n /// Doc comment description that should be ignored\n fn test_function(&self) -> Result<(), Error> {\n Ok(())\n }\n };\n let result = tool(attr, input)?;\n\n // The output should contain the explicit description\n let result_str = result.to_string();\n assert!(result_str.contains(\"Explicit description has priority\"));\n Ok(())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/sink_stream.rs", "use std::sync::Arc;\n\nuse futures::{Sink, Stream};\nuse tokio::sync::Mutex;\n\nuse super::{IntoTransport, Transport};\nuse crate::service::{RxJsonRpcMessage, ServiceRole, TxJsonRpcMessage};\n\npub struct SinkStreamTransport {\n stream: St,\n sink: Arc>,\n}\n\nimpl SinkStreamTransport {\n pub fn new(sink: Si, stream: St) -> Self {\n Self {\n stream,\n sink: Arc::new(Mutex::new(sink)),\n }\n }\n}\n\nimpl Transport for SinkStreamTransport\nwhere\n St: Send + Stream> + Unpin,\n Si: Send + Sink> + Unpin + 'static,\n Si::Error: std::error::Error + Send + Sync + 'static,\n{\n type Error = Si::Error;\n\n fn send(\n &mut self,\n item: TxJsonRpcMessage,\n ) -> impl Future> + Send + 'static {\n use futures::SinkExt;\n let lock = self.sink.clone();\n async move {\n let mut write = lock.lock().await;\n write.send(item).await\n }\n }\n\n fn receive(&mut self) -> impl Future>> {\n use futures::StreamExt;\n self.stream.next()\n }\n\n async fn close(&mut self) -> Result<(), Self::Error> {\n Ok(())\n }\n}\n\npub enum TransportAdapterSinkStream {}\n\nimpl IntoTransport for (Si, St)\nwhere\n Role: ServiceRole,\n Si: Send + Sink> + Unpin + 'static,\n St: Send + Stream> + Unpin + 'static,\n Si::Error: std::error::Error + Send + Sync + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n SinkStreamTransport::new(self.0, self.1)\n }\n}\n\npub enum TransportAdapterAsyncCombinedRW {}\nimpl IntoTransport for S\nwhere\n Role: ServiceRole,\n S: Sink> + Stream> + Send + 'static,\n S::Error: std::error::Error + Send + Sync + 'static,\n{\n fn into_transport(self) -> impl Transport + 'static {\n use futures::StreamExt;\n IntoTransport::::into_transport(self.split())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/serde_impl.rs", "use std::borrow::Cow;\n\nuse serde::{Deserialize, Serialize};\n\nuse super::{\n Extensions, Meta, Notification, NotificationNoParam, Request, RequestNoParam,\n RequestOptionalParam,\n};\n#[derive(Serialize, Deserialize)]\nstruct WithMeta<'a, P> {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n _meta: Option>,\n #[serde(flatten)]\n _rest: P,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct Proxy<'a, M, P> {\n method: M,\n params: WithMeta<'a, P>,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct ProxyOptionalParam<'a, M, P> {\n method: M,\n params: Option>,\n}\n\n#[derive(Serialize, Deserialize)]\nstruct ProxyNoParam {\n method: M,\n}\n\nimpl Serialize for Request\nwhere\n M: Serialize,\n R: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n Proxy::serialize(\n &Proxy {\n method: &self.method,\n params: WithMeta {\n _rest: &self.params,\n _meta,\n },\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M, R> Deserialize<'de> for Request\nwhere\n M: Deserialize<'de>,\n R: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = Proxy::deserialize(deserializer)?;\n let _meta = body.params._meta.map(|m| m.into_owned());\n let mut extensions = Extensions::new();\n if let Some(meta) = _meta {\n extensions.insert(meta);\n }\n Ok(Request {\n extensions,\n method: body.method,\n params: body.params._rest,\n })\n }\n}\n\nimpl Serialize for RequestOptionalParam\nwhere\n M: Serialize,\n R: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n Proxy::serialize(\n &Proxy {\n method: &self.method,\n params: WithMeta {\n _rest: &self.params,\n _meta,\n },\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M, R> Deserialize<'de> for RequestOptionalParam\nwhere\n M: Deserialize<'de>,\n R: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = ProxyOptionalParam::<'_, _, Option>::deserialize(deserializer)?;\n let mut params = None;\n let mut _meta = None;\n if let Some(body_params) = body.params {\n params = body_params._rest;\n _meta = body_params._meta.map(|m| m.into_owned());\n }\n let mut extensions = Extensions::new();\n if let Some(meta) = _meta {\n extensions.insert(meta);\n }\n Ok(RequestOptionalParam {\n extensions,\n method: body.method,\n params,\n })\n }\n}\n\nimpl Serialize for RequestNoParam\nwhere\n M: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n ProxyNoParam::serialize(\n &ProxyNoParam {\n method: &self.method,\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M> Deserialize<'de> for RequestNoParam\nwhere\n M: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = ProxyNoParam::<_>::deserialize(deserializer)?;\n let extensions = Extensions::new();\n Ok(RequestNoParam {\n extensions,\n method: body.method,\n })\n }\n}\n\nimpl Serialize for Notification\nwhere\n M: Serialize,\n R: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n Proxy::serialize(\n &Proxy {\n method: &self.method,\n params: WithMeta {\n _rest: &self.params,\n _meta,\n },\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M, R> Deserialize<'de> for Notification\nwhere\n M: Deserialize<'de>,\n R: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = Proxy::deserialize(deserializer)?;\n let _meta = body.params._meta.map(|m| m.into_owned());\n let mut extensions = Extensions::new();\n if let Some(meta) = _meta {\n extensions.insert(meta);\n }\n Ok(Notification {\n extensions,\n method: body.method,\n params: body.params._rest,\n })\n }\n}\n\nimpl Serialize for NotificationNoParam\nwhere\n M: Serialize,\n{\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let extensions = &self.extensions;\n let _meta = extensions.get::().map(Cow::Borrowed);\n ProxyNoParam::serialize(\n &ProxyNoParam {\n method: &self.method,\n },\n serializer,\n )\n }\n}\n\nimpl<'de, M> Deserialize<'de> for NotificationNoParam\nwhere\n M: Deserialize<'de>,\n{\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let body = ProxyNoParam::<_>::deserialize(deserializer)?;\n let extensions = Extensions::new();\n Ok(NotificationNoParam {\n extensions,\n method: body.method,\n })\n }\n}\n\n#[cfg(test)]\nmod test {\n use serde_json::json;\n\n use crate::model::ListToolsRequest;\n\n #[test]\n fn test_deserialize_lost_tools_request() {\n let _req: ListToolsRequest = serde_json::from_value(json!(\n {\n \"method\": \"tools/list\",\n }\n ))\n .unwrap();\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/client.rs", "pub mod progress;\nuse crate::{\n error::ErrorData as McpError,\n model::*,\n service::{NotificationContext, RequestContext, RoleClient, Service, ServiceRole},\n};\n\nimpl Service for H {\n async fn handle_request(\n &self,\n request: ::PeerReq,\n context: RequestContext,\n ) -> Result<::Resp, McpError> {\n match request {\n ServerRequest::PingRequest(_) => self.ping(context).await.map(ClientResult::empty),\n ServerRequest::CreateMessageRequest(request) => self\n .create_message(request.params, context)\n .await\n .map(ClientResult::CreateMessageResult),\n ServerRequest::ListRootsRequest(_) => self\n .list_roots(context)\n .await\n .map(ClientResult::ListRootsResult),\n }\n }\n\n async fn handle_notification(\n &self,\n notification: ::PeerNot,\n context: NotificationContext,\n ) -> Result<(), McpError> {\n match notification {\n ServerNotification::CancelledNotification(notification) => {\n self.on_cancelled(notification.params, context).await\n }\n ServerNotification::ProgressNotification(notification) => {\n self.on_progress(notification.params, context).await\n }\n ServerNotification::LoggingMessageNotification(notification) => {\n self.on_logging_message(notification.params, context).await\n }\n ServerNotification::ResourceUpdatedNotification(notification) => {\n self.on_resource_updated(notification.params, context).await\n }\n ServerNotification::ResourceListChangedNotification(_notification_no_param) => {\n self.on_resource_list_changed(context).await\n }\n ServerNotification::ToolListChangedNotification(_notification_no_param) => {\n self.on_tool_list_changed(context).await\n }\n ServerNotification::PromptListChangedNotification(_notification_no_param) => {\n self.on_prompt_list_changed(context).await\n }\n };\n Ok(())\n }\n\n fn get_info(&self) -> ::Info {\n self.get_info()\n }\n}\n\n#[allow(unused_variables)]\npub trait ClientHandler: Sized + Send + Sync + 'static {\n fn ping(\n &self,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(()))\n }\n\n fn create_message(\n &self,\n params: CreateMessageRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(\n McpError::method_not_found::(),\n ))\n }\n\n fn list_roots(\n &self,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListRootsResult::default()))\n }\n\n fn on_cancelled(\n &self,\n params: CancelledNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_progress(\n &self,\n params: ProgressNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_logging_message(\n &self,\n params: LoggingMessageNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_resource_updated(\n &self,\n params: ResourceUpdatedNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_resource_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_tool_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_prompt_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n\n fn get_info(&self) -> ClientInfo {\n ClientInfo::default()\n }\n}\n\n/// Do nothing, with default client info.\nimpl ClientHandler for () {}\n\n/// Do nothing, with a specific client info.\nimpl ClientHandler for ClientInfo {\n fn get_info(&self) -> ClientInfo {\n self.clone()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/child_process.rs", "use std::process::Stdio;\n\nuse process_wrap::tokio::{TokioChildWrapper, TokioCommandWrap};\nuse tokio::{\n io::AsyncRead,\n process::{ChildStderr, ChildStdin, ChildStdout},\n};\n\nuse super::{IntoTransport, Transport};\nuse crate::service::ServiceRole;\n\n/// The parts of a child process.\ntype ChildProcessParts = (\n Box,\n ChildStdout,\n ChildStdin,\n Option,\n);\n\n/// Extract the stdio handles from a spawned child.\n/// Returns `(child, stdout, stdin, stderr)` where `stderr` is `Some` only\n/// if the process was spawned with `Stdio::piped()`.\n#[inline]\nfn child_process(mut child: Box) -> std::io::Result {\n let child_stdin = match child.inner_mut().stdin().take() {\n Some(stdin) => stdin,\n None => return Err(std::io::Error::other(\"stdin was already taken\")),\n };\n let child_stdout = match child.inner_mut().stdout().take() {\n Some(stdout) => stdout,\n None => return Err(std::io::Error::other(\"stdout was already taken\")),\n };\n let child_stderr = child.inner_mut().stderr().take();\n Ok((child, child_stdout, child_stdin, child_stderr))\n}\n\npub struct TokioChildProcess {\n child: ChildWithCleanup,\n child_stdin: ChildStdin,\n child_stdout: ChildStdout,\n}\n\npub struct ChildWithCleanup {\n inner: Box,\n}\n\nimpl Drop for ChildWithCleanup {\n fn drop(&mut self) {\n if let Err(e) = self.inner.start_kill() {\n tracing::warn!(\"Failed to kill child process: {e}\");\n }\n }\n}\n\n// we hold the child process with stdout, for it's easier to implement AsyncRead\npin_project_lite::pin_project! {\n pub struct TokioChildProcessOut {\n child: ChildWithCleanup,\n #[pin]\n child_stdout: ChildStdout,\n }\n}\n\nimpl TokioChildProcessOut {\n /// Get the process ID of the child process.\n pub fn id(&self) -> Option {\n self.child.inner.id()\n }\n}\n\nimpl AsyncRead for TokioChildProcessOut {\n fn poll_read(\n self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n buf: &mut tokio::io::ReadBuf<'_>,\n ) -> std::task::Poll> {\n self.project().child_stdout.poll_read(cx, buf)\n }\n}\n\nimpl TokioChildProcess {\n /// Convenience: spawn with default `piped` stdio\n pub fn new(command: impl Into) -> std::io::Result {\n let (proc, _ignored) = TokioChildProcessBuilder::new(command).spawn()?;\n Ok(proc)\n }\n\n /// Builder entry-point allowing fine-grained stdio control.\n pub fn builder(command: impl Into) -> TokioChildProcessBuilder {\n TokioChildProcessBuilder::new(command)\n }\n\n /// Get the process ID of the child process.\n pub fn id(&self) -> Option {\n self.child.inner.id()\n }\n\n /// Split this helper into a reader (stdout) and writer (stdin).\n pub fn split(self) -> (TokioChildProcessOut, ChildStdin) {\n let TokioChildProcess {\n child,\n child_stdin,\n child_stdout,\n } = self;\n (\n TokioChildProcessOut {\n child,\n child_stdout,\n },\n child_stdin,\n )\n }\n}\n\n/// Builder for `TokioChildProcess` allowing custom `Stdio` configuration.\npub struct TokioChildProcessBuilder {\n cmd: TokioCommandWrap,\n stdin: Stdio,\n stdout: Stdio,\n stderr: Stdio,\n}\n\nimpl TokioChildProcessBuilder {\n fn new(cmd: impl Into) -> Self {\n Self {\n cmd: cmd.into(),\n stdin: Stdio::piped(),\n stdout: Stdio::piped(),\n stderr: Stdio::inherit(),\n }\n }\n\n /// Override the child stdin configuration.\n pub fn stdin(mut self, io: impl Into) -> Self {\n self.stdin = io.into();\n self\n }\n /// Override the child stdout configuration.\n pub fn stdout(mut self, io: impl Into) -> Self {\n self.stdout = io.into();\n self\n }\n /// Override the child stderr configuration.\n pub fn stderr(mut self, io: impl Into) -> Self {\n self.stderr = io.into();\n self\n }\n\n /// Spawn the child process. Returns the transport plus an optional captured stderr handle.\n pub fn spawn(mut self) -> std::io::Result<(TokioChildProcess, Option)> {\n self.cmd\n .command_mut()\n .stdin(self.stdin)\n .stdout(self.stdout)\n .stderr(self.stderr);\n\n let (child, stdout, stdin, stderr_opt) = child_process(self.cmd.spawn()?)?;\n\n let proc = TokioChildProcess {\n child: ChildWithCleanup { inner: child },\n child_stdin: stdin,\n child_stdout: stdout,\n };\n Ok((proc, stderr_opt))\n }\n}\n\nimpl IntoTransport for TokioChildProcess {\n fn into_transport(self) -> impl Transport + 'static {\n IntoTransport::::into_transport(\n self.split(),\n )\n }\n}\n\npub trait ConfigureCommandExt {\n fn configure(self, f: impl FnOnce(&mut Self)) -> Self;\n}\n\nimpl ConfigureCommandExt for tokio::process::Command {\n fn configure(mut self, f: impl FnOnce(&mut Self)) -> Self {\n f(&mut self);\n self\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/client/progress.rs", "use std::{collections::HashMap, sync::Arc};\n\nuse futures::{Stream, StreamExt};\nuse tokio::sync::RwLock;\nuse tokio_stream::wrappers::ReceiverStream;\n\nuse crate::model::{ProgressNotificationParam, ProgressToken};\ntype Dispatcher =\n Arc>>>;\n\n/// A dispatcher for progress notifications.\n#[derive(Debug, Clone, Default)]\npub struct ProgressDispatcher {\n pub(crate) dispatcher: Dispatcher,\n}\n\nimpl ProgressDispatcher {\n const CHANNEL_SIZE: usize = 16;\n pub fn new() -> Self {\n Self::default()\n }\n\n /// Handle a progress notification by sending it to the appropriate subscriber\n pub async fn handle_notification(&self, notification: ProgressNotificationParam) {\n let token = ¬ification.progress_token;\n if let Some(sender) = self.dispatcher.read().await.get(token).cloned() {\n let send_result = sender.send(notification).await;\n if let Err(e) = send_result {\n tracing::warn!(\"Failed to send progress notification: {e}\");\n }\n }\n }\n\n /// Subscribe to progress notifications for a specific token.\n ///\n /// If you drop the returned `ProgressSubscriber`, it will automatically unsubscribe from notifications for that token.\n pub async fn subscribe(&self, progress_token: ProgressToken) -> ProgressSubscriber {\n let (sender, receiver) = tokio::sync::mpsc::channel(Self::CHANNEL_SIZE);\n self.dispatcher\n .write()\n .await\n .insert(progress_token.clone(), sender);\n let receiver = ReceiverStream::new(receiver);\n ProgressSubscriber {\n progress_token,\n receiver,\n dispacher: self.dispatcher.clone(),\n }\n }\n\n /// Unsubscribe from progress notifications for a specific token.\n pub async fn unsubscribe(&self, token: &ProgressToken) {\n self.dispatcher.write().await.remove(token);\n }\n\n /// Clear all dispachter.\n pub async fn clear(&self) {\n let mut dispacher = self.dispatcher.write().await;\n dispacher.clear();\n }\n}\n\npub struct ProgressSubscriber {\n pub(crate) progress_token: ProgressToken,\n pub(crate) receiver: ReceiverStream,\n pub(crate) dispacher: Dispatcher,\n}\n\nimpl ProgressSubscriber {\n pub fn progress_token(&self) -> &ProgressToken {\n &self.progress_token\n }\n}\n\nimpl Stream for ProgressSubscriber {\n type Item = ProgressNotificationParam;\n\n fn poll_next(\n mut self: std::pin::Pin<&mut Self>,\n cx: &mut std::task::Context<'_>,\n ) -> std::task::Poll> {\n self.receiver.poll_next_unpin(cx)\n }\n\n fn size_hint(&self) -> (usize, Option) {\n self.receiver.size_hint()\n }\n}\n\nimpl Drop for ProgressSubscriber {\n fn drop(&mut self) {\n let token = self.progress_token.clone();\n self.receiver.close();\n let dispatcher = self.dispacher.clone();\n tokio::spawn(async move {\n let mut dispacher = dispatcher.write_owned().await;\n dispacher.remove(&token);\n });\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/extension.rs", "//! A container for those extra data could be carried on request or notification.\n//!\n//! This file is copied and modified from crate [http](https://github.com/hyperium/http).\n//!\n//! - Original code license: \n//! - Original code: \nuse std::{\n any::{Any, TypeId},\n collections::HashMap,\n fmt,\n hash::{BuildHasherDefault, Hasher},\n};\n\ntype AnyMap = HashMap, BuildHasherDefault>;\n\n// With TypeIds as keys, there's no need to hash them. They are already hashes\n// themselves, coming from the compiler. The IdHasher just holds the u64 of\n// the TypeId, and then returns it, instead of doing any bit fiddling.\n#[derive(Default)]\nstruct IdHasher(u64);\n\nimpl Hasher for IdHasher {\n fn write(&mut self, _: &[u8]) {\n unreachable!(\"TypeId calls write_u64\");\n }\n\n #[inline]\n fn write_u64(&mut self, id: u64) {\n self.0 = id;\n }\n\n #[inline]\n fn finish(&self) -> u64 {\n self.0\n }\n}\n\n/// A type map of protocol extensions.\n///\n/// `Extensions` can be used by `Request` `Notification` and `Response` to store\n/// extra data derived from the underlying protocol.\n#[derive(Clone, Default)]\npub struct Extensions {\n // If extensions are never used, no need to carry around an empty HashMap.\n // That's 3 words. Instead, this is only 1 word.\n map: Option>,\n}\n\nimpl Extensions {\n /// Create an empty `Extensions`.\n #[inline]\n pub const fn new() -> Extensions {\n Extensions { map: None }\n }\n\n /// Insert a type into this `Extensions`.\n ///\n /// If a extension of this type already existed, it will\n /// be returned and replaced with the new one.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert!(ext.insert(5i32).is_none());\n /// assert!(ext.insert(4u8).is_none());\n /// assert_eq!(ext.insert(9i32), Some(5i32));\n /// ```\n pub fn insert(&mut self, val: T) -> Option {\n self.map\n .get_or_insert_with(Box::default)\n .insert(TypeId::of::(), Box::new(val))\n .and_then(|boxed| boxed.into_any().downcast().ok().map(|boxed| *boxed))\n }\n\n /// Get a reference to a type previously inserted on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert!(ext.get::().is_none());\n /// ext.insert(5i32);\n ///\n /// assert_eq!(ext.get::(), Some(&5i32));\n /// ```\n pub fn get(&self) -> Option<&T> {\n self.map\n .as_ref()\n .and_then(|map| map.get(&TypeId::of::()))\n .and_then(|boxed| (**boxed).as_any().downcast_ref())\n }\n\n /// Get a mutable reference to a type previously inserted on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// ext.insert(String::from(\"Hello\"));\n /// ext.get_mut::().unwrap().push_str(\" World\");\n ///\n /// assert_eq!(ext.get::().unwrap(), \"Hello World\");\n /// ```\n pub fn get_mut(&mut self) -> Option<&mut T> {\n self.map\n .as_mut()\n .and_then(|map| map.get_mut(&TypeId::of::()))\n .and_then(|boxed| (**boxed).as_any_mut().downcast_mut())\n }\n\n /// Get a mutable reference to a type, inserting `value` if not already present on this\n /// `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// *ext.get_or_insert(1i32) += 2;\n ///\n /// assert_eq!(*ext.get::().unwrap(), 3);\n /// ```\n pub fn get_or_insert(&mut self, value: T) -> &mut T {\n self.get_or_insert_with(|| value)\n }\n\n /// Get a mutable reference to a type, inserting the value created by `f` if not already present\n /// on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// *ext.get_or_insert_with(|| 1i32) += 2;\n ///\n /// assert_eq!(*ext.get::().unwrap(), 3);\n /// ```\n pub fn get_or_insert_with T>(\n &mut self,\n f: F,\n ) -> &mut T {\n let out = self\n .map\n .get_or_insert_with(Box::default)\n .entry(TypeId::of::())\n .or_insert_with(|| Box::new(f()));\n (**out).as_any_mut().downcast_mut().unwrap()\n }\n\n /// Get a mutable reference to a type, inserting the type's default value if not already present\n /// on this `Extensions`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// *ext.get_or_insert_default::() += 2;\n ///\n /// assert_eq!(*ext.get::().unwrap(), 2);\n /// ```\n pub fn get_or_insert_default(&mut self) -> &mut T {\n self.get_or_insert_with(T::default)\n }\n\n /// Remove a type from this `Extensions`.\n ///\n /// If a extension of this type existed, it will be returned.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// ext.insert(5i32);\n /// assert_eq!(ext.remove::(), Some(5i32));\n /// assert!(ext.get::().is_none());\n /// ```\n pub fn remove(&mut self) -> Option {\n self.map\n .as_mut()\n .and_then(|map| map.remove(&TypeId::of::()))\n .and_then(|boxed| boxed.into_any().downcast().ok().map(|boxed| *boxed))\n }\n\n /// Clear the `Extensions` of all inserted extensions.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// ext.insert(5i32);\n /// ext.clear();\n ///\n /// assert!(ext.get::().is_none());\n /// ```\n #[inline]\n pub fn clear(&mut self) {\n if let Some(ref mut map) = self.map {\n map.clear();\n }\n }\n\n /// Check whether the extension set is empty or not.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert!(ext.is_empty());\n /// ext.insert(5i32);\n /// assert!(!ext.is_empty());\n /// ```\n #[inline]\n pub fn is_empty(&self) -> bool {\n self.map.as_ref().is_none_or(|map| map.is_empty())\n }\n\n /// Get the number of extensions available.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext = Extensions::new();\n /// assert_eq!(ext.len(), 0);\n /// ext.insert(5i32);\n /// assert_eq!(ext.len(), 1);\n /// ```\n #[inline]\n pub fn len(&self) -> usize {\n self.map.as_ref().map_or(0, |map| map.len())\n }\n\n /// Extends `self` with another `Extensions`.\n ///\n /// If an instance of a specific type exists in both, the one in `self` is overwritten with the\n /// one from `other`.\n ///\n /// # Example\n ///\n /// ```\n /// # use rmcp::model::Extensions;\n /// let mut ext_a = Extensions::new();\n /// ext_a.insert(8u8);\n /// ext_a.insert(16u16);\n ///\n /// let mut ext_b = Extensions::new();\n /// ext_b.insert(4u8);\n /// ext_b.insert(\"hello\");\n ///\n /// ext_a.extend(ext_b);\n /// assert_eq!(ext_a.len(), 3);\n /// assert_eq!(ext_a.get::(), Some(&4u8));\n /// assert_eq!(ext_a.get::(), Some(&16u16));\n /// assert_eq!(ext_a.get::<&'static str>().copied(), Some(\"hello\"));\n /// ```\n pub fn extend(&mut self, other: Self) {\n if let Some(other) = other.map {\n if let Some(map) = &mut self.map {\n map.extend(*other);\n } else {\n self.map = Some(other);\n }\n }\n }\n}\n\nimpl fmt::Debug for Extensions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n f.debug_struct(\"Extensions\").finish()\n }\n}\n\ntrait AnyClone: Any {\n fn clone_box(&self) -> Box;\n fn as_any(&self) -> &dyn Any;\n fn as_any_mut(&mut self) -> &mut dyn Any;\n fn into_any(self: Box) -> Box;\n}\n\nimpl AnyClone for T {\n fn clone_box(&self) -> Box {\n Box::new(self.clone())\n }\n\n fn as_any(&self) -> &dyn Any {\n self\n }\n\n fn as_any_mut(&mut self) -> &mut dyn Any {\n self\n }\n\n fn into_any(self: Box) -> Box {\n self\n }\n}\n\nimpl Clone for Box {\n fn clone(&self) -> Self {\n (**self).clone_box()\n }\n}\n\n#[test]\nfn test_extensions() {\n #[derive(Clone, Debug, PartialEq)]\n struct MyType(i32);\n\n let mut extensions = Extensions::new();\n\n extensions.insert(5i32);\n extensions.insert(MyType(10));\n\n assert_eq!(extensions.get(), Some(&5i32));\n assert_eq!(extensions.get_mut(), Some(&mut 5i32));\n\n let ext2 = extensions.clone();\n\n assert_eq!(extensions.remove::(), Some(5i32));\n assert!(extensions.get::().is_none());\n\n // clone still has it\n assert_eq!(ext2.get(), Some(&5i32));\n assert_eq!(ext2.get(), Some(&MyType(10)));\n\n assert_eq!(extensions.get::(), None);\n assert_eq!(extensions.get(), Some(&MyType(10)));\n}\n"], ["/rust-sdk/crates/rmcp/src/service/tower.rs", "use std::{future::poll_fn, marker::PhantomData};\n\nuse tower_service::Service as TowerService;\n\nuse super::NotificationContext;\nuse crate::service::{RequestContext, Service, ServiceRole};\n\npub struct TowerHandler {\n pub service: S,\n pub info: R::Info,\n role: PhantomData,\n}\n\nimpl TowerHandler {\n pub fn new(service: S, info: R::Info) -> Self {\n Self {\n service,\n role: PhantomData,\n info,\n }\n }\n}\n\nimpl Service for TowerHandler\nwhere\n S: TowerService + Sync + Send + Clone + 'static,\n S::Error: Into,\n S::Future: Send,\n{\n async fn handle_request(\n &self,\n request: R::PeerReq,\n _context: RequestContext,\n ) -> Result {\n let mut service = self.service.clone();\n poll_fn(|cx| service.poll_ready(cx))\n .await\n .map_err(Into::into)?;\n let resp = service.call(request).await.map_err(Into::into)?;\n Ok(resp)\n }\n\n fn handle_notification(\n &self,\n _notification: R::PeerNot,\n _context: NotificationContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(()))\n }\n\n fn get_info(&self) -> R::Info {\n self.info.clone()\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server.rs", "use crate::{\n error::ErrorData as McpError,\n model::*,\n service::{NotificationContext, RequestContext, RoleServer, Service, ServiceRole},\n};\n\nmod resource;\npub mod router;\npub mod tool;\npub mod wrapper;\nimpl Service for H {\n async fn handle_request(\n &self,\n request: ::PeerReq,\n context: RequestContext,\n ) -> Result<::Resp, McpError> {\n match request {\n ClientRequest::InitializeRequest(request) => self\n .initialize(request.params, context)\n .await\n .map(ServerResult::InitializeResult),\n ClientRequest::PingRequest(_request) => {\n self.ping(context).await.map(ServerResult::empty)\n }\n ClientRequest::CompleteRequest(request) => self\n .complete(request.params, context)\n .await\n .map(ServerResult::CompleteResult),\n ClientRequest::SetLevelRequest(request) => self\n .set_level(request.params, context)\n .await\n .map(ServerResult::empty),\n ClientRequest::GetPromptRequest(request) => self\n .get_prompt(request.params, context)\n .await\n .map(ServerResult::GetPromptResult),\n ClientRequest::ListPromptsRequest(request) => self\n .list_prompts(request.params, context)\n .await\n .map(ServerResult::ListPromptsResult),\n ClientRequest::ListResourcesRequest(request) => self\n .list_resources(request.params, context)\n .await\n .map(ServerResult::ListResourcesResult),\n ClientRequest::ListResourceTemplatesRequest(request) => self\n .list_resource_templates(request.params, context)\n .await\n .map(ServerResult::ListResourceTemplatesResult),\n ClientRequest::ReadResourceRequest(request) => self\n .read_resource(request.params, context)\n .await\n .map(ServerResult::ReadResourceResult),\n ClientRequest::SubscribeRequest(request) => self\n .subscribe(request.params, context)\n .await\n .map(ServerResult::empty),\n ClientRequest::UnsubscribeRequest(request) => self\n .unsubscribe(request.params, context)\n .await\n .map(ServerResult::empty),\n ClientRequest::CallToolRequest(request) => self\n .call_tool(request.params, context)\n .await\n .map(ServerResult::CallToolResult),\n ClientRequest::ListToolsRequest(request) => self\n .list_tools(request.params, context)\n .await\n .map(ServerResult::ListToolsResult),\n }\n }\n\n async fn handle_notification(\n &self,\n notification: ::PeerNot,\n context: NotificationContext,\n ) -> Result<(), McpError> {\n match notification {\n ClientNotification::CancelledNotification(notification) => {\n self.on_cancelled(notification.params, context).await\n }\n ClientNotification::ProgressNotification(notification) => {\n self.on_progress(notification.params, context).await\n }\n ClientNotification::InitializedNotification(_notification) => {\n self.on_initialized(context).await\n }\n ClientNotification::RootsListChangedNotification(_notification) => {\n self.on_roots_list_changed(context).await\n }\n };\n Ok(())\n }\n\n fn get_info(&self) -> ::Info {\n self.get_info()\n }\n}\n\n#[allow(unused_variables)]\npub trait ServerHandler: Sized + Send + Sync + 'static {\n fn ping(\n &self,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(()))\n }\n // handle requests\n fn initialize(\n &self,\n request: InitializeRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n if context.peer.peer_info().is_none() {\n context.peer.set_peer_info(request);\n }\n std::future::ready(Ok(self.get_info()))\n }\n fn complete(\n &self,\n request: CompleteRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn set_level(\n &self,\n request: SetLevelRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn get_prompt(\n &self,\n request: GetPromptRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn list_prompts(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListPromptsResult::default()))\n }\n fn list_resources(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListResourcesResult::default()))\n }\n fn list_resource_templates(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListResourceTemplatesResult::default()))\n }\n fn read_resource(\n &self,\n request: ReadResourceRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(\n McpError::method_not_found::(),\n ))\n }\n fn subscribe(\n &self,\n request: SubscribeRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn unsubscribe(\n &self,\n request: UnsubscribeRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn call_tool(\n &self,\n request: CallToolRequestParam,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Err(McpError::method_not_found::()))\n }\n fn list_tools(\n &self,\n request: Option,\n context: RequestContext,\n ) -> impl Future> + Send + '_ {\n std::future::ready(Ok(ListToolsResult::default()))\n }\n\n fn on_cancelled(\n &self,\n notification: CancelledNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_progress(\n &self,\n notification: ProgressNotificationParam,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n fn on_initialized(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n tracing::info!(\"client initialized\");\n std::future::ready(())\n }\n fn on_roots_list_changed(\n &self,\n context: NotificationContext,\n ) -> impl Future + Send + '_ {\n std::future::ready(())\n }\n\n fn get_info(&self) -> ServerInfo {\n ServerInfo::default()\n }\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/tool_handler.rs", "use darling::{FromMeta, ast::NestedMeta};\nuse proc_macro2::TokenStream;\nuse quote::{ToTokens, quote};\nuse syn::{Expr, ImplItem, ItemImpl};\n\n#[derive(FromMeta)]\n#[darling(default)]\npub struct ToolHandlerAttribute {\n pub router: Expr,\n}\n\nimpl Default for ToolHandlerAttribute {\n fn default() -> Self {\n Self {\n router: syn::parse2(quote! {\n self.tool_router\n })\n .unwrap(),\n }\n }\n}\n\npub fn tool_handler(attr: TokenStream, input: TokenStream) -> syn::Result {\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n let ToolHandlerAttribute { router } = ToolHandlerAttribute::from_list(&attr_args)?;\n let mut item_impl = syn::parse2::(input.clone())?;\n let tool_call_fn = quote! {\n async fn call_tool(\n &self,\n request: rmcp::model::CallToolRequestParam,\n context: rmcp::service::RequestContext,\n ) -> Result {\n let tcc = rmcp::handler::server::tool::ToolCallContext::new(self, request, context);\n #router.call(tcc).await\n }\n };\n let tool_list_fn = quote! {\n async fn list_tools(\n &self,\n _request: Option,\n _context: rmcp::service::RequestContext,\n ) -> Result {\n Ok(rmcp::model::ListToolsResult::with_all_items(#router.list_all()))\n }\n };\n let tool_call_fn = syn::parse2::(tool_call_fn)?;\n let tool_list_fn = syn::parse2::(tool_list_fn)?;\n item_impl.items.push(tool_call_fn);\n item_impl.items.push(tool_list_fn);\n Ok(item_impl.into_token_stream())\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/tool_router.rs", "//! ```ignore\n//! #[rmcp::tool_router(router)]\n//! impl Handler {\n//!\n//! }\n//! ```\n//!\n\nuse darling::{FromMeta, ast::NestedMeta};\nuse proc_macro2::TokenStream;\nuse quote::{ToTokens, format_ident, quote};\nuse syn::{Ident, ImplItem, ItemImpl, Visibility};\n\n#[derive(FromMeta)]\n#[darling(default)]\npub struct ToolRouterAttribute {\n pub router: Ident,\n pub vis: Option,\n}\n\nimpl Default for ToolRouterAttribute {\n fn default() -> Self {\n Self {\n router: format_ident!(\"tool_router\"),\n vis: None,\n }\n }\n}\n\npub fn tool_router(attr: TokenStream, input: TokenStream) -> syn::Result {\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n let ToolRouterAttribute { router, vis } = ToolRouterAttribute::from_list(&attr_args)?;\n let mut item_impl = syn::parse2::(input.clone())?;\n // find all function marked with `#[rmcp::tool]`\n let tool_attr_fns: Vec<_> = item_impl\n .items\n .iter()\n .filter_map(|item| {\n if let syn::ImplItem::Fn(fn_item) = item {\n fn_item\n .attrs\n .iter()\n .any(|attr| {\n attr.path()\n .segments\n .last()\n .is_some_and(|seg| seg.ident == \"tool\")\n })\n .then_some(&fn_item.sig.ident)\n } else {\n None\n }\n })\n .collect();\n let mut routers = vec![];\n for handler in tool_attr_fns {\n let tool_attr_fn_ident = format_ident!(\"{handler}_tool_attr\");\n routers.push(quote! {\n .with_route((Self::#tool_attr_fn_ident(), Self::#handler))\n })\n }\n let router_fn = syn::parse2::(quote! {\n #vis fn #router() -> rmcp::handler::server::router::tool::ToolRouter {\n rmcp::handler::server::router::tool::ToolRouter::::new()\n #(#routers)*\n }\n })?;\n item_impl.items.push(router_fn);\n Ok(item_impl.into_token_stream())\n}\n\n#[cfg(test)]\nmod test {\n use super::*;\n #[test]\n fn test_router_attr() -> Result<(), Box> {\n let attr = quote! {\n router = test_router,\n };\n let attr_args = NestedMeta::parse_meta_list(attr)?;\n let ToolRouterAttribute { router, vis } = ToolRouterAttribute::from_list(&attr_args)?;\n println!(\"router: {}\", router);\n if let Some(vis) = vis {\n println!(\"visibility: {}\", vis.to_token_stream());\n } else {\n println!(\"visibility: None\");\n }\n Ok(())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/meta.rs", "use std::ops::{Deref, DerefMut};\n\nuse serde::{Deserialize, Serialize};\nuse serde_json::Value;\n\nuse super::{\n ClientNotification, ClientRequest, Extensions, JsonObject, JsonRpcMessage, NumberOrString,\n ProgressToken, ServerNotification, ServerRequest,\n};\n\npub trait GetMeta {\n fn get_meta_mut(&mut self) -> &mut Meta;\n fn get_meta(&self) -> &Meta;\n}\n\npub trait GetExtensions {\n fn extensions(&self) -> &Extensions;\n fn extensions_mut(&mut self) -> &mut Extensions;\n}\n\nmacro_rules! variant_extension {\n (\n $Enum: ident {\n $($variant: ident)*\n }\n ) => {\n impl GetExtensions for $Enum {\n fn extensions(&self) -> &Extensions {\n match self {\n $(\n $Enum::$variant(v) => &v.extensions,\n )*\n }\n }\n fn extensions_mut(&mut self) -> &mut Extensions {\n match self {\n $(\n $Enum::$variant(v) => &mut v.extensions,\n )*\n }\n }\n }\n impl GetMeta for $Enum {\n fn get_meta_mut(&mut self) -> &mut Meta {\n self.extensions_mut().get_or_insert_default()\n }\n fn get_meta(&self) -> &Meta {\n self.extensions().get::().unwrap_or(Meta::static_empty())\n }\n }\n };\n}\n\nvariant_extension! {\n ClientRequest {\n PingRequest\n InitializeRequest\n CompleteRequest\n SetLevelRequest\n GetPromptRequest\n ListPromptsRequest\n ListResourcesRequest\n ListResourceTemplatesRequest\n ReadResourceRequest\n SubscribeRequest\n UnsubscribeRequest\n CallToolRequest\n ListToolsRequest\n }\n}\n\nvariant_extension! {\n ServerRequest {\n PingRequest\n CreateMessageRequest\n ListRootsRequest\n }\n}\n\nvariant_extension! {\n ClientNotification {\n CancelledNotification\n ProgressNotification\n InitializedNotification\n RootsListChangedNotification\n }\n}\n\nvariant_extension! {\n ServerNotification {\n CancelledNotification\n ProgressNotification\n LoggingMessageNotification\n ResourceUpdatedNotification\n ResourceListChangedNotification\n ToolListChangedNotification\n PromptListChangedNotification\n }\n}\n#[derive(Debug, Serialize, Deserialize, Clone, Default)]\n#[serde(transparent)]\npub struct Meta(pub JsonObject);\nconst PROGRESS_TOKEN_FIELD: &str = \"progressToken\";\nimpl Meta {\n pub fn new() -> Self {\n Self(JsonObject::new())\n }\n\n pub(crate) fn static_empty() -> &'static Self {\n static EMPTY: std::sync::OnceLock = std::sync::OnceLock::new();\n EMPTY.get_or_init(Default::default)\n }\n\n pub fn get_progress_token(&self) -> Option {\n self.0.get(PROGRESS_TOKEN_FIELD).and_then(|v| match v {\n Value::String(s) => Some(ProgressToken(NumberOrString::String(s.to_string().into()))),\n Value::Number(n) => n\n .as_u64()\n .map(|n| ProgressToken(NumberOrString::Number(n as u32))),\n _ => None,\n })\n }\n\n pub fn set_progress_token(&mut self, token: ProgressToken) {\n match token.0 {\n NumberOrString::String(ref s) => self.0.insert(\n PROGRESS_TOKEN_FIELD.to_string(),\n Value::String(s.to_string()),\n ),\n NumberOrString::Number(n) => self\n .0\n .insert(PROGRESS_TOKEN_FIELD.to_string(), Value::Number(n.into())),\n };\n }\n\n pub fn extend(&mut self, other: Meta) {\n for (k, v) in other.0.into_iter() {\n self.0.insert(k, v);\n }\n }\n}\n\nimpl Deref for Meta {\n type Target = JsonObject;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl DerefMut for Meta {\n fn deref_mut(&mut self) -> &mut Self::Target {\n &mut self.0\n }\n}\n\nimpl JsonRpcMessage\nwhere\n Req: GetExtensions,\n Noti: GetExtensions,\n{\n pub fn insert_extension(&mut self, value: T) {\n match self {\n JsonRpcMessage::Request(json_rpc_request) => {\n json_rpc_request.request.extensions_mut().insert(value);\n }\n JsonRpcMessage::Notification(json_rpc_notification) => {\n json_rpc_notification\n .notification\n .extensions_mut()\n .insert(value);\n }\n JsonRpcMessage::BatchRequest(json_rpc_batch_request_items) => {\n for item in json_rpc_batch_request_items {\n match item {\n super::JsonRpcBatchRequestItem::Request(json_rpc_request) => {\n json_rpc_request\n .request\n .extensions_mut()\n .insert(value.clone());\n }\n super::JsonRpcBatchRequestItem::Notification(json_rpc_notification) => {\n json_rpc_notification\n .notification\n .extensions_mut()\n .insert(value.clone());\n }\n }\n }\n }\n _ => {}\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/error.rs", "use std::{borrow::Cow, fmt::Display};\n\nuse crate::ServiceError;\npub use crate::model::ErrorData;\n#[deprecated(\n note = \"Use `rmcp::ErrorData` instead, `rmcp::ErrorData` could become `RmcpError` in the future.\"\n)]\npub type Error = ErrorData;\nimpl Display for ErrorData {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.code.0, self.message)?;\n if let Some(data) = &self.data {\n write!(f, \"({})\", data)?;\n }\n Ok(())\n }\n}\n\nimpl std::error::Error for ErrorData {}\n\n/// This is an unified error type for the errors could be returned by the service.\n#[derive(Debug, thiserror::Error)]\npub enum RmcpError {\n #[error(\"Service error: {0}\")]\n Service(#[from] ServiceError),\n #[cfg(feature = \"client\")]\n #[error(\"Client initialization error: {0}\")]\n ClientInitialize(#[from] crate::service::ClientInitializeError),\n #[cfg(feature = \"server\")]\n #[error(\"Server initialization error: {0}\")]\n ServerInitialize(#[from] crate::service::ServerInitializeError),\n #[error(\"Runtime error: {0}\")]\n Runtime(#[from] tokio::task::JoinError),\n #[error(\"Transport creation error: {error}\")]\n // TODO: Maybe we can introduce something like `TryIntoTransport` to auto wrap transport type,\n // but it could be an breaking change, so we could do it in the future.\n TransportCreation {\n into_transport_type_name: Cow<'static, str>,\n into_transport_type_id: std::any::TypeId,\n #[source]\n error: Box,\n },\n // and cancellation shouldn't be an error?\n}\n\nimpl RmcpError {\n pub fn transport_creation(\n error: impl Into>,\n ) -> Self {\n RmcpError::TransportCreation {\n into_transport_type_id: std::any::TypeId::of::(),\n into_transport_type_name: std::any::type_name::().into(),\n error: error.into(),\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/model/annotated.rs", "use std::ops::{Deref, DerefMut};\n\nuse chrono::{DateTime, Utc};\nuse serde::{Deserialize, Serialize};\n\nuse super::{\n RawAudioContent, RawContent, RawEmbeddedResource, RawImageContent, RawResource,\n RawResourceTemplate, RawTextContent, Role,\n};\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Annotations {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub audience: Option>,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub priority: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub timestamp: Option>,\n}\n\nimpl Annotations {\n /// Creates a new Annotations instance specifically for resources\n /// optional priority, and a timestamp (defaults to now if None)\n pub fn for_resource(priority: f32, timestamp: DateTime) -> Self {\n assert!(\n (0.0..=1.0).contains(&priority),\n \"Priority {priority} must be between 0.0 and 1.0\"\n );\n Annotations {\n priority: Some(priority),\n timestamp: Some(timestamp),\n audience: None,\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Annotated {\n #[serde(flatten)]\n pub raw: T,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub annotations: Option,\n}\n\nimpl Deref for Annotated {\n type Target = T;\n fn deref(&self) -> &Self::Target {\n &self.raw\n }\n}\n\nimpl DerefMut for Annotated {\n fn deref_mut(&mut self) -> &mut Self::Target {\n &mut self.raw\n }\n}\n\nimpl Annotated {\n pub fn new(raw: T, annotations: Option) -> Self {\n Self { raw, annotations }\n }\n pub fn remove_annotation(&mut self) -> Option {\n self.annotations.take()\n }\n pub fn audience(&self) -> Option<&Vec> {\n self.annotations.as_ref().and_then(|a| a.audience.as_ref())\n }\n pub fn priority(&self) -> Option {\n self.annotations.as_ref().and_then(|a| a.priority)\n }\n pub fn timestamp(&self) -> Option> {\n self.annotations.as_ref().and_then(|a| a.timestamp)\n }\n pub fn with_audience(self, audience: Vec) -> Annotated\n where\n Self: Sized,\n {\n if let Some(annotations) = self.annotations {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n audience: Some(audience),\n ..annotations\n }),\n }\n } else {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n audience: Some(audience),\n priority: None,\n timestamp: None,\n }),\n }\n }\n }\n pub fn with_priority(self, priority: f32) -> Annotated\n where\n Self: Sized,\n {\n if let Some(annotations) = self.annotations {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n priority: Some(priority),\n ..annotations\n }),\n }\n } else {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n priority: Some(priority),\n timestamp: None,\n audience: None,\n }),\n }\n }\n }\n pub fn with_timestamp(self, timestamp: DateTime) -> Annotated\n where\n Self: Sized,\n {\n if let Some(annotations) = self.annotations {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n timestamp: Some(timestamp),\n ..annotations\n }),\n }\n } else {\n Annotated {\n raw: self.raw,\n annotations: Some(Annotations {\n timestamp: Some(timestamp),\n priority: None,\n audience: None,\n }),\n }\n }\n }\n pub fn with_timestamp_now(self) -> Annotated\n where\n Self: Sized,\n {\n self.with_timestamp(Utc::now())\n }\n}\n\nmod sealed {\n pub trait Sealed {}\n}\nmacro_rules! annotate {\n ($T: ident) => {\n impl sealed::Sealed for $T {}\n impl AnnotateAble for $T {}\n };\n}\n\nannotate!(RawContent);\nannotate!(RawTextContent);\nannotate!(RawImageContent);\nannotate!(RawAudioContent);\nannotate!(RawEmbeddedResource);\nannotate!(RawResource);\nannotate!(RawResourceTemplate);\n\npub trait AnnotateAble: sealed::Sealed {\n fn optional_annotate(self, annotations: Option) -> Annotated\n where\n Self: Sized,\n {\n Annotated::new(self, annotations)\n }\n fn annotate(self, annotations: Annotations) -> Annotated\n where\n Self: Sized,\n {\n Annotated::new(self, Some(annotations))\n }\n fn no_annotation(self) -> Annotated\n where\n Self: Sized,\n {\n Annotated::new(self, None)\n }\n fn with_audience(self, audience: Vec) -> Annotated\n where\n Self: Sized,\n {\n self.annotate(Annotations {\n audience: Some(audience),\n ..Default::default()\n })\n }\n fn with_priority(self, priority: f32) -> Annotated\n where\n Self: Sized,\n {\n self.annotate(Annotations {\n priority: Some(priority),\n ..Default::default()\n })\n }\n fn with_timestamp(self, timestamp: DateTime) -> Annotated\n where\n Self: Sized,\n {\n self.annotate(Annotations {\n timestamp: Some(timestamp),\n ..Default::default()\n })\n }\n fn with_timestamp_now(self) -> Annotated\n where\n Self: Sized,\n {\n self.with_timestamp(Utc::now())\n }\n}\n"], ["/rust-sdk/crates/rmcp-macros/src/lib.rs", "#[allow(unused_imports)]\nuse proc_macro::TokenStream;\n\nmod tool;\nmod tool_handler;\nmod tool_router;\n/// # tool\n///\n/// This macro is used to mark a function as a tool handler.\n///\n/// This will generate a function that return the attribute of this tool, with type `rmcp::model::Tool`.\n///\n/// ## Usage\n///\n/// | feied | type | usage |\n/// | :- | :- | :- |\n/// | `name` | `String` | The name of the tool. If not provided, it defaults to the function name. |\n/// | `description` | `String` | A description of the tool. The document of this function will be used. |\n/// | `input_schema` | `Expr` | A JSON Schema object defining the expected parameters for the tool. If not provide, if will use the json schema of its argument with type `Parameters` |\n/// | `annotations` | `ToolAnnotationsAttribute` | Additional tool information. Defaults to `None`. |\n///\n/// ## Example\n///\n/// ```rust,ignore\n/// #[tool(name = \"my_tool\", description = \"This is my tool\", annotations(title = \"我的工具\", read_only_hint = true))]\n/// pub async fn my_tool(param: Parameters) {\n/// // handling tool request\n/// }\n/// ```\n#[proc_macro_attribute]\npub fn tool(attr: TokenStream, input: TokenStream) -> TokenStream {\n tool::tool(attr.into(), input.into())\n .unwrap_or_else(|err| err.to_compile_error())\n .into()\n}\n\n/// # tool_router\n///\n/// This macro is used to generate a tool router based on functions marked with `#[rmcp::tool]` in an implementation block.\n///\n/// It creates a function that returns a `ToolRouter` instance.\n///\n/// In most case, you need to add a field for handler to store the router information and initialize it when creating handler, or store it with a static variable.\n/// ## Usage\n///\n/// | feied | type | usage |\n/// | :- | :- | :- |\n/// | `router` | `Ident` | The name of the router function to be generated. Defaults to `tool_router`. |\n/// | `vis` | `Visibility` | The visibility of the generated router function. Defaults to empty. |\n///\n/// ## Example\n///\n/// ```rust,ignore\n/// #[tool_router]\n/// impl MyToolHandler {\n/// #[tool]\n/// pub fn my_tool() {\n/// \n/// }\n///\n/// pub fn new() -> Self {\n/// Self {\n/// // the default name of tool router will be `tool_router`\n/// tool_router: Self::tool_router(),\n/// }\n/// }\n/// }\n/// ```\n///\n/// Or specify the visibility and router name, which would be helpful when you want to combine multiple routers into one:\n///\n/// ```rust,ignore\n/// mod a {\n/// #[tool_router(router = tool_router_a, vis = pub)]\n/// impl MyToolHandler {\n/// #[tool]\n/// fn my_tool_a() {\n/// \n/// }\n/// }\n/// }\n///\n/// mod b {\n/// #[tool_router(router = tool_router_b, vis = pub)]\n/// impl MyToolHandler {\n/// #[tool]\n/// fn my_tool_b() {\n/// \n/// }\n/// }\n/// }\n///\n/// impl MyToolHandler {\n/// fn new() -> Self {\n/// Self {\n/// tool_router: self::tool_router_a() + self::tool_router_b(),\n/// }\n/// }\n/// }\n/// ```\n#[proc_macro_attribute]\npub fn tool_router(attr: TokenStream, input: TokenStream) -> TokenStream {\n tool_router::tool_router(attr.into(), input.into())\n .unwrap_or_else(|err| err.to_compile_error())\n .into()\n}\n\n/// # tool_handler\n///\n/// This macro will generate the handler for `tool_call` and `list_tools` methods in the implementation block, by using an existing `ToolRouter` instance.\n///\n/// ## Usage\n///\n/// | field | type | usage |\n/// | :- | :- | :- |\n/// | `router` | `Expr` | The expression to access the `ToolRouter` instance. Defaults to `self.tool_router`. |\n/// ## Example\n/// ```rust,ignore\n/// #[tool_handler]\n/// impl ServerHandler for MyToolHandler {\n/// // ...implement other handler\n/// }\n/// ```\n///\n/// or using a custom router expression:\n/// ```rust,ignore\n/// #[tool_handler(router = self.get_router().await)]\n/// impl ServerHandler for MyToolHandler {\n/// // ...implement other handler\n/// }\n/// ```\n///\n/// ## Explain\n///\n/// This macro will be expended to something like this:\n/// ```rust,ignore\n/// impl ServerHandler for MyToolHandler {\n/// async fn call_tool(\n/// &self,\n/// request: CallToolRequestParam,\n/// context: RequestContext,\n/// ) -> Result {\n/// let tcc = ToolCallContext::new(self, request, context);\n/// self.tool_router.call(tcc).await\n/// }\n///\n/// async fn list_tools(\n/// &self,\n/// _request: Option,\n/// _context: RequestContext,\n/// ) -> Result {\n/// let items = self.tool_router.list_all();\n/// Ok(ListToolsResult::with_all_items(items))\n/// }\n/// }\n/// ```\n#[proc_macro_attribute]\npub fn tool_handler(attr: TokenStream, input: TokenStream) -> TokenStream {\n tool_handler::tool_handler(attr.into(), input.into())\n .unwrap_or_else(|err| err.to_compile_error())\n .into()\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common.rs", "#[cfg(any(\n feature = \"transport-streamable-http-server\",\n feature = \"transport-sse-server\"\n))]\npub mod server_side_http;\n\npub mod http_header;\n\n#[cfg(feature = \"__reqwest\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"reqwest\")))]\nmod reqwest;\n\n#[cfg(feature = \"client-side-sse\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client-side-sse\")))]\npub mod client_side_sse;\n\n#[cfg(feature = \"auth\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"auth\")))]\npub mod auth;\n"], ["/rust-sdk/crates/rmcp/src/model/prompt.rs", "use base64::engine::{Engine, general_purpose::STANDARD as BASE64_STANDARD};\nuse serde::{Deserialize, Serialize};\n\nuse super::{\n AnnotateAble, Annotations, RawEmbeddedResource, RawImageContent,\n content::{EmbeddedResource, ImageContent},\n resource::ResourceContents,\n};\n\n/// A prompt that can be used to generate text from a model\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Prompt {\n /// The name of the prompt\n pub name: String,\n /// Optional description of what the prompt does\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n /// Optional arguments that can be passed to customize the prompt\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub arguments: Option>,\n}\n\nimpl Prompt {\n /// Create a new prompt with the given name, description and arguments\n pub fn new(\n name: N,\n description: Option,\n arguments: Option>,\n ) -> Self\n where\n N: Into,\n D: Into,\n {\n Prompt {\n name: name.into(),\n description: description.map(Into::into),\n arguments,\n }\n }\n}\n\n/// Represents a prompt argument that can be passed to customize the prompt\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptArgument {\n /// The name of the argument\n pub name: String,\n /// A description of what the argument is used for\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n /// Whether this argument is required\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub required: Option,\n}\n\n/// Represents the role of a message sender in a prompt conversation\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum PromptMessageRole {\n User,\n Assistant,\n}\n\n/// Content types that can be included in prompt messages\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(tag = \"type\", rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum PromptMessageContent {\n /// Plain text content\n Text { text: String },\n /// Image content with base64-encoded data\n Image {\n #[serde(flatten)]\n image: ImageContent,\n },\n /// Embedded server-side resource\n Resource { resource: EmbeddedResource },\n}\n\nimpl PromptMessageContent {\n pub fn text(text: impl Into) -> Self {\n Self::Text { text: text.into() }\n }\n}\n\n/// A message in a prompt conversation\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct PromptMessage {\n /// The role of the message sender\n pub role: PromptMessageRole,\n /// The content of the message\n pub content: PromptMessageContent,\n}\n\nimpl PromptMessage {\n /// Create a new text message with the given role and text content\n pub fn new_text>(role: PromptMessageRole, text: S) -> Self {\n Self {\n role,\n content: PromptMessageContent::Text { text: text.into() },\n }\n }\n #[cfg(feature = \"base64\")]\n pub fn new_image(\n role: PromptMessageRole,\n data: &[u8],\n mime_type: &str,\n annotations: Option,\n ) -> Self {\n let mime_type = mime_type.into();\n\n let base64 = BASE64_STANDARD.encode(data);\n\n Self {\n role,\n content: PromptMessageContent::Image {\n image: RawImageContent {\n data: base64,\n mime_type,\n }\n .optional_annotate(annotations),\n },\n }\n }\n\n /// Create a new resource message\n pub fn new_resource(\n role: PromptMessageRole,\n uri: String,\n mime_type: String,\n text: Option,\n annotations: Option,\n ) -> Self {\n let resource_contents = ResourceContents::TextResourceContents {\n uri,\n mime_type: Some(mime_type),\n text: text.unwrap_or_default(),\n };\n\n Self {\n role,\n content: PromptMessageContent::Resource {\n resource: RawEmbeddedResource {\n resource: resource_contents,\n }\n .optional_annotate(annotations),\n },\n }\n }\n}\n\n/// A template for a prompt\n#[derive(Debug, Serialize, Deserialize)]\npub struct PromptTemplate {\n pub id: String,\n pub template: String,\n pub arguments: Vec,\n}\n\n/// A template for a prompt argument, this should be identical to PromptArgument\n#[derive(Debug, Serialize, Deserialize)]\npub struct PromptArgumentTemplate {\n pub name: String,\n pub description: Option,\n pub required: Option,\n}\n"], ["/rust-sdk/crates/rmcp/src/model/tool.rs", "use std::{borrow::Cow, sync::Arc};\n\n/// Tools represent a routine that a server can execute\n/// Tool calls represent requests from the client to execute one\nuse serde::{Deserialize, Serialize};\nuse serde_json::Value;\n\nuse super::JsonObject;\n\n/// A tool that can be used by a model.\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct Tool {\n /// The name of the tool\n pub name: Cow<'static, str>,\n /// A description of what the tool does\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option>,\n /// A JSON Schema object defining the expected parameters for the tool\n pub input_schema: Arc,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n /// Optional additional tool information.\n pub annotations: Option,\n}\n\n/// Additional properties describing a Tool to clients.\n///\n/// NOTE: all properties in ToolAnnotations are **hints**.\n/// They are not guaranteed to provide a faithful description of\n/// tool behavior (including descriptive properties like `title`).\n///\n/// Clients should never make tool use decisions based on ToolAnnotations\n/// received from untrusted servers.\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct ToolAnnotations {\n /// A human-readable title for the tool.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub title: Option,\n\n /// If true, the tool does not modify its environment.\n ///\n /// Default: false\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub read_only_hint: Option,\n\n /// If true, the tool may perform destructive updates to its environment.\n /// If false, the tool performs only additive updates.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: true\n /// A human-readable description of the tool's purpose.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub destructive_hint: Option,\n\n /// If true, calling the tool repeatedly with the same arguments\n /// will have no additional effect on the its environment.\n ///\n /// (This property is meaningful only when `readOnlyHint == false`)\n ///\n /// Default: false.\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub idempotent_hint: Option,\n\n /// If true, this tool may interact with an \"open world\" of external\n /// entities. If false, the tool's domain of interaction is closed.\n /// For example, the world of a web search tool is open, whereas that\n /// of a memory tool is not.\n ///\n /// Default: true\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub open_world_hint: Option,\n}\n\nimpl ToolAnnotations {\n pub fn new() -> Self {\n Self::default()\n }\n pub fn with_title(title: T) -> Self\n where\n T: Into,\n {\n ToolAnnotations {\n title: Some(title.into()),\n ..Self::default()\n }\n }\n pub fn read_only(self, read_only: bool) -> Self {\n ToolAnnotations {\n read_only_hint: Some(read_only),\n ..self\n }\n }\n pub fn destructive(self, destructive: bool) -> Self {\n ToolAnnotations {\n destructive_hint: Some(destructive),\n ..self\n }\n }\n pub fn idempotent(self, idempotent: bool) -> Self {\n ToolAnnotations {\n idempotent_hint: Some(idempotent),\n ..self\n }\n }\n pub fn open_world(self, open_world: bool) -> Self {\n ToolAnnotations {\n open_world_hint: Some(open_world),\n ..self\n }\n }\n\n /// If not set, defaults to true.\n pub fn is_destructive(&self) -> bool {\n self.destructive_hint.unwrap_or(true)\n }\n\n /// If not set, defaults to false.\n pub fn is_idempotent(&self) -> bool {\n self.idempotent_hint.unwrap_or(false)\n }\n}\n\nimpl Tool {\n /// Create a new tool with the given name and description\n pub fn new(name: N, description: D, input_schema: S) -> Self\n where\n N: Into>,\n D: Into>,\n S: Into>,\n {\n Tool {\n name: name.into(),\n description: Some(description.into()),\n input_schema: input_schema.into(),\n annotations: None,\n }\n }\n\n pub fn annotate(self, annotations: ToolAnnotations) -> Self {\n Tool {\n annotations: Some(annotations),\n ..self\n }\n }\n\n /// Get the schema as json value\n pub fn schema_as_json_value(&self) -> Value {\n Value::Object(self.input_schema.as_ref().clone())\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/lib.rs", "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, allow(unused_attributes))]\n//! The official Rust SDK for the Model Context Protocol (MCP).\n//!\n//! The MCP is a protocol that allows AI assistants to communicate with other\n//! services. `rmcp` is the official Rust implementation of this protocol.\n//!\n//! There are two ways in which the library can be used, namely to build a\n//! server or to build a client.\n//!\n//! ## Server\n//!\n//! A server is a service that exposes capabilities. For example, a common\n//! use-case is for the server to make multiple tools available to clients such\n//! as Claude Desktop or the Cursor IDE.\n//!\n//! For example, to implement a server that has a tool that can count, you would\n//! make an object for that tool and add an implementation with the `#[tool_router]` macro:\n//!\n//! ```rust\n//! use std::sync::Arc;\n//! use rmcp::{ErrorData as McpError, model::*, tool, tool_router, handler::server::tool::ToolRouter};\n//! use tokio::sync::Mutex;\n//!\n//! #[derive(Clone)]\n//! pub struct Counter {\n//! counter: Arc>,\n//! tool_router: ToolRouter,\n//! }\n//!\n//! #[tool_router]\n//! impl Counter {\n//! fn new() -> Self {\n//! Self {\n//! counter: Arc::new(Mutex::new(0)),\n//! tool_router: Self::tool_router(),\n//! }\n//! }\n//!\n//! #[tool(description = \"Increment the counter by 1\")]\n//! async fn increment(&self) -> Result {\n//! let mut counter = self.counter.lock().await;\n//! *counter += 1;\n//! Ok(CallToolResult::success(vec![Content::text(\n//! counter.to_string(),\n//! )]))\n//! }\n//! }\n//! ```\n//!\n//! Next also implement [ServerHandler] for `Counter` and start the server inside\n//! `main` by calling `Counter::new().serve(...)`. See the examples directory in the repository for more information.\n//!\n//! ## Client\n//!\n//! A client can be used to interact with a server. Clients can be used to get a\n//! list of the available tools and to call them. For example, we can `uv` to\n//! start a MCP server in Python and then list the tools and call `git status`\n//! as follows:\n//!\n//! ```rust\n//! use anyhow::Result;\n//! use rmcp::{model::CallToolRequestParam, service::ServiceExt, transport::{TokioChildProcess, ConfigureCommandExt}};\n//! use tokio::process::Command;\n//!\n//! async fn client() -> Result<()> {\n//! let service = ().serve(TokioChildProcess::new(Command::new(\"uvx\").configure(|cmd| {\n//! cmd.arg(\"mcp-server-git\");\n//! }))?).await?;\n//!\n//! // Initialize\n//! let server_info = service.peer_info();\n//! println!(\"Connected to server: {server_info:#?}\");\n//!\n//! // List tools\n//! let tools = service.list_tools(Default::default()).await?;\n//! println!(\"Available tools: {tools:#?}\");\n//!\n//! // Call tool 'git_status' with arguments = {\"repo_path\": \".\"}\n//! let tool_result = service\n//! .call_tool(CallToolRequestParam {\n//! name: \"git_status\".into(),\n//! arguments: serde_json::json!({ \"repo_path\": \".\" }).as_object().cloned(),\n//! })\n//! .await?;\n//! println!(\"Tool result: {tool_result:#?}\");\n//!\n//! service.cancel().await?;\n//! Ok(())\n//! }\n//! ```\nmod error;\n#[allow(deprecated)]\npub use error::{Error, ErrorData, RmcpError};\n\n/// Basic data types in MCP specification\npub mod model;\n#[cfg(any(feature = \"client\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(any(feature = \"client\", feature = \"server\"))))]\npub mod service;\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub use handler::client::ClientHandler;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub use handler::server::ServerHandler;\n#[cfg(any(feature = \"client\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(any(feature = \"client\", feature = \"server\"))))]\npub use service::{Peer, Service, ServiceError, ServiceExt};\n#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub use service::{RoleClient, serve_client};\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub use service::{RoleServer, serve_server};\n\npub mod handler;\npub mod transport;\n\n// re-export\n#[cfg(all(feature = \"macros\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(all(feature = \"macros\", feature = \"server\"))))]\npub use paste::paste;\n#[cfg(all(feature = \"macros\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(all(feature = \"macros\", feature = \"server\"))))]\npub use rmcp_macros::*;\n#[cfg(all(feature = \"macros\", feature = \"server\"))]\n#[cfg_attr(docsrs, doc(cfg(all(feature = \"macros\", feature = \"server\"))))]\npub use schemars;\n#[cfg(feature = \"macros\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"macros\")))]\npub use serde;\n#[cfg(feature = \"macros\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"macros\")))]\npub use serde_json;\n"], ["/rust-sdk/crates/rmcp/src/model/resource.rs", "use serde::{Deserialize, Serialize};\n\nuse super::Annotated;\n\n/// Represents a resource in the extension with metadata\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawResource {\n /// URI representing the resource location (e.g., \"file:///path/to/file\" or \"str:///content\")\n pub uri: String,\n /// Name of the resource\n pub name: String,\n /// Optional description of the resource\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n /// MIME type of the resource content (\"text\" or \"blob\")\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub mime_type: Option,\n\n /// The size of the raw resource content, in bytes (i.e., before base64 encoding or any tokenization), if known.\n ///\n /// This can be used by Hosts to display file sizes and estimate context window us\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub size: Option,\n}\n\npub type Resource = Annotated;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\")]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub struct RawResourceTemplate {\n pub uri_template: String,\n pub name: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub mime_type: Option,\n}\n\npub type ResourceTemplate = Annotated;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]\n#[serde(rename_all = \"camelCase\", untagged)]\n#[cfg_attr(feature = \"schemars\", derive(schemars::JsonSchema))]\npub enum ResourceContents {\n TextResourceContents {\n uri: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n mime_type: Option,\n text: String,\n },\n BlobResourceContents {\n uri: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n mime_type: Option,\n blob: String,\n },\n}\n\nimpl ResourceContents {\n pub fn text(text: impl Into, uri: impl Into) -> Self {\n Self::TextResourceContents {\n uri: uri.into(),\n mime_type: Some(\"text\".into()),\n text: text.into(),\n }\n }\n}\n\nimpl RawResource {\n /// Creates a new Resource from a URI with explicit mime type\n pub fn new(uri: impl Into, name: impl Into) -> Self {\n Self {\n uri: uri.into(),\n name: name.into(),\n description: None,\n mime_type: None,\n size: None,\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/handler/server/wrapper/json.rs", "use serde::Serialize;\n\nuse crate::model::IntoContents;\n\n/// Json wrapper\n///\n/// This is used to tell the SDK to serialize the inner value into json\npub struct Json(pub T);\n\nimpl IntoContents for Json\nwhere\n T: Serialize,\n{\n fn into_contents(self) -> Vec {\n let result = crate::model::Content::json(self.0);\n debug_assert!(\n result.is_ok(),\n \"Json wrapped content should be able to serialized into json\"\n );\n match result {\n Ok(content) => vec![content],\n Err(e) => {\n tracing::error!(\"failed to convert json content: {e}\");\n vec![]\n }\n }\n }\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/common/http_header.rs", "pub const HEADER_SESSION_ID: &str = \"Mcp-Session-Id\";\npub const HEADER_LAST_EVENT_ID: &str = \"Last-Event-Id\";\npub const EVENT_STREAM_MIME_TYPE: &str = \"text/event-stream\";\npub const JSON_MIME_TYPE: &str = \"application/json\";\n"], ["/rust-sdk/crates/rmcp/src/transport/common/auth.rs", "#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\nmod streamable_http_client;\n\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\nmod sse_client;\n"], ["/rust-sdk/crates/rmcp/src/transport/common/reqwest.rs", "#[cfg(feature = \"transport-streamable-http-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-client\")))]\nmod streamable_http_client;\n\n#[cfg(feature = \"transport-sse-client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-sse-client\")))]\nmod sse_client;\n"], ["/rust-sdk/crates/rmcp/src/transport/io.rs", "/// # StdIO Transport\n///\n/// Create a pair of [`tokio::io::Stdin`] and [`tokio::io::Stdout`].\npub fn stdio() -> (tokio::io::Stdin, tokio::io::Stdout) {\n (tokio::io::stdin(), tokio::io::stdout())\n}\n"], ["/rust-sdk/crates/rmcp/src/transport/streamable_http_server.rs", "pub mod session;\n#[cfg(feature = \"transport-streamable-http-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server\")))]\npub mod tower;\npub use session::{SessionId, SessionManager};\n#[cfg(feature = \"transport-streamable-http-server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"transport-streamable-http-server\")))]\npub use tower::{StreamableHttpServerConfig, StreamableHttpService};\n"], ["/rust-sdk/crates/rmcp/src/handler.rs", "#[cfg(feature = \"client\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"client\")))]\npub mod client;\n#[cfg(feature = \"server\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"server\")))]\npub mod server;\n"], ["/rust-sdk/crates/rmcp/src/handler/server/resource.rs", "\n"], ["/rust-sdk/crates/rmcp/src/handler/server/wrapper.rs", "mod json;\npub use json::*;\n"], ["/rust-sdk/crates/rmcp/src/transport/ws.rs", "// Maybe we don't really need a ws implementation?\n"], ["/rust-sdk/crates/rmcp/src/handler/server/router/prompt.rs", ""]], "task_instance_info": {"created_time": "2025-08-21 01:43:26", "created_task_model": "DeepSeek-R1", "function_skeleton": "async fn post_message (\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n auth_token: Option,\n ) -> Result<(), SseTransportError> {}", "code_description": "This code segment functions to: Send a structured data payload to a specified endpoint using HTTP POST, include authentication credentials if provided, and handle potential transmission errors while ensuring successful delivery confirmation."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```rust\n[TASK_BEGIN]\n async fn post_message(\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n auth_token: Option,\n ) -> Result<(), SseTransportError> {\n let json_body = serde_json::to_vec(&message).map_err(SseTransportError::Serialization)?;\n \n let mut request_builder = self\n .post(uri.to_string())\n .header(reqwest::header::CONTENT_TYPE, \"application/json\")\n .body(json_body);\n \n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n\n let response = request_builder.send().await?;\n let _ = response.error_for_status()?;\n \n Ok(())\n }\n[TASK_END]\n```", "inference_time": "2025-08-21 01-43-39"}, "editdistance_info": {"edit_distance": 46.7532, "calculate_time": "2025-08-21 01:43:39", "true_code_clean": "async fn post_message(\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n auth_token: Option,\n ) -> Result<(), SseTransportError> {\n let mut request_builder = self.post(uri.to_string()).json(&message);\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n request_builder\n .send()\n .await\n .and_then(|resp| resp.error_for_status())\n .map_err(SseTransportError::from)\n .map(drop)\n }", "predict_code_clean": "async fn post_message(\n &self,\n uri: Uri,\n message: crate::model::ClientJsonRpcMessage,\n auth_token: Option,\n ) -> Result<(), SseTransportError> {\n let json_body = serde_json::to_vec(&message).map_err(SseTransportError::Serialization)?;\n let mut request_builder = self\n .post(uri.to_string())\n .header(reqwest::header::CONTENT_TYPE, \"application/json\")\n .body(json_body);\n if let Some(auth_header) = auth_token {\n request_builder = request_builder.bearer_auth(auth_header);\n }\n let response = request_builder.send().await?;\n let _ = response.error_for_status()?;\n Ok(())\n }"}}